• R/O
  • HTTP
  • SSH
  • HTTPS

tomoyo-test1: 提交

This is a test repository.


Commit MetaInfo

修订版ca85855bdcae8f84f1512e88b4c75009ea17ea2f (tree)
时间2022-07-19 09:16:22
作者Linus Torvalds <torvalds@linu...>
CommiterLinus Torvalds

Log Message

Second v5.19 rc pull request

Two bug fixes for irdma:

- x722 does not support 1GB pages, trying to configure them will corrupt
the dma mapping

- Fix a sleep while holding a spinlock
-----BEGIN PGP SIGNATURE-----

iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCYtXkFgAKCRCFwuHvBreF
YUYpAQCyJax6IA7UKZr48gFDCtjQvn75JGtGE0yeD1Ag8trqQQEAtaErEgUVn4LG
zLKOn8F5nbdrtBgql7c5ZUZCNbsn2QM=
=IN55
-----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:

"Two bug fixes for irdma:
- x722 does not support 1GB pages, trying to configure them will
corrupt the dma mapping
- Fix a sleep while holding a spinlock"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:

RDMA/irdma: Fix sleep from invalid context BUG
RDMA/irdma: Do not advertise 1GB page size for x722

更改概述

差异

--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
42314231 struct irdma_cm_node *cm_node;
42324232 struct list_head teardown_list;
42334233 struct ib_qp_attr attr;
4234- struct irdma_sc_vsi *vsi = &iwdev->vsi;
4235- struct irdma_sc_qp *sc_qp;
4236- struct irdma_qp *qp;
4237- int i;
42384234
42394235 INIT_LIST_HEAD(&teardown_list);
42404236
@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
42514247 irdma_cm_disconn(cm_node->iwqp);
42524248 irdma_rem_ref_cm_node(cm_node);
42534249 }
4254- if (!iwdev->roce_mode)
4255- return;
4256-
4257- INIT_LIST_HEAD(&teardown_list);
4258- for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
4259- mutex_lock(&vsi->qos[i].qos_mutex);
4260- list_for_each_safe (list_node, list_core_temp,
4261- &vsi->qos[i].qplist) {
4262- u32 qp_ip[4];
4263-
4264- sc_qp = container_of(list_node, struct irdma_sc_qp,
4265- list);
4266- if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
4267- continue;
4268-
4269- qp = sc_qp->qp_uk.back_qp;
4270- if (!disconnect_all) {
4271- if (nfo->ipv4)
4272- qp_ip[0] = qp->udp_info.local_ipaddr[3];
4273- else
4274- memcpy(qp_ip,
4275- &qp->udp_info.local_ipaddr[0],
4276- sizeof(qp_ip));
4277- }
4278-
4279- if (disconnect_all ||
4280- (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
4281- !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
4282- spin_lock(&iwdev->rf->qptable_lock);
4283- if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
4284- irdma_qp_add_ref(&qp->ibqp);
4285- list_add(&qp->teardown_entry,
4286- &teardown_list);
4287- }
4288- spin_unlock(&iwdev->rf->qptable_lock);
4289- }
4290- }
4291- mutex_unlock(&vsi->qos[i].qos_mutex);
4292- }
4293-
4294- list_for_each_safe (list_node, list_core_temp, &teardown_list) {
4295- qp = container_of(list_node, struct irdma_qp, teardown_entry);
4296- attr.qp_state = IB_QPS_ERR;
4297- irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
4298- irdma_qp_rem_ref(&qp->ibqp);
4299- }
43004250 }
43014251
43024252 /**
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
201201 dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
202202 dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
203203 dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
204+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
204205 dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
205206 dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
206207 dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
139139 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
140140 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
141141 dev->irq_ops = &icrdma_irq_ops;
142+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
142143 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
143144 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
144145 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
127127 u64 max_hw_outbound_msg_size;
128128 u64 max_hw_inbound_msg_size;
129129 u64 max_mr_size;
130+ u64 page_size_cap;
130131 u32 min_hw_qp_id;
131132 u32 min_hw_aeq_size;
132133 u32 max_hw_aeq_size;
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
3232 props->vendor_part_id = pcidev->device;
3333
3434 props->hw_ver = rf->pcidev->revision;
35- props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
35+ props->page_size_cap = hw_attrs->page_size_cap;
3636 props->max_mr_size = hw_attrs->max_mr_size;
3737 props->max_qp = rf->max_qp - rf->used_qps;
3838 props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
27812781
27822782 if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
27832783 iwmr->page_size = ib_umem_find_best_pgsz(region,
2784- SZ_4K | SZ_2M | SZ_1G,
2784+ iwdev->rf->sc_dev.hw_attrs.page_size_cap,
27852785 virt);
27862786 if (unlikely(!iwmr->page_size)) {
27872787 kfree(iwmr);
Show on old repository browser