mirror of
https://github.com/coolsnowwolf/lede.git
synced 2025-04-16 04:13:31 +00:00
394 lines
12 KiB
Diff
394 lines
12 KiB
Diff
From 652935ba05860eadaa19ac9efe7aea61fb7a3aef Mon Sep 17 00:00:00 2001
|
|
From: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
|
Date: Wed, 17 Apr 2024 12:32:53 +0530
|
|
Subject: [PATCH] PCI: qcom: Use devm_clk_bulk_get_all() API
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
There is no need for the device drivers to validate the clocks defined in
|
|
Devicetree. The validation should be performed by the DT schema and the
|
|
drivers should just get all the clocks from DT. Right now the driver
|
|
hardcodes the clock info and validates them against DT which is redundant.
|
|
|
|
So use devm_clk_bulk_get_all() that just gets all the clocks defined in DT
|
|
and get rid of all static clocks info from the driver. This simplifies the
|
|
driver.
|
|
|
|
Link: https://lore.kernel.org/linux-pci/20240417-pci-qcom-clk-bulk-v1-1-52ca19b3d6b2@linaro.org
|
|
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
|
Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
|
|
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
|
|
---
|
|
drivers/pci/controller/dwc/pcie-qcom.c | 177 ++++++++-----------------
|
|
1 file changed, 58 insertions(+), 119 deletions(-)
|
|
|
|
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
|
|
index 14772edcf0d3..3d2eeff9a876 100644
|
|
--- a/drivers/pci/controller/dwc/pcie-qcom.c
|
|
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
|
|
@@ -154,58 +154,56 @@
|
|
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
|
|
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
|
|
|
|
-#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
|
|
struct qcom_pcie_resources_1_0_0 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct reset_control *core;
|
|
struct regulator *vdda;
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
|
|
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
|
|
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
|
|
struct qcom_pcie_resources_2_1_0 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
|
|
int num_resets;
|
|
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
|
|
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
|
|
struct qcom_pcie_resources_2_3_2 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
|
|
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
|
|
struct qcom_pcie_resources_2_3_3 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
|
|
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
|
|
struct qcom_pcie_resources_2_4_0 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
int num_clks;
|
|
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
|
|
int num_resets;
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
|
|
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
|
|
struct qcom_pcie_resources_2_7_0 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
int num_clks;
|
|
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
|
|
struct reset_control *rst;
|
|
};
|
|
|
|
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
|
|
struct qcom_pcie_resources_2_9_0 {
|
|
- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
struct reset_control *rst;
|
|
};
|
|
|
|
@@ -337,21 +335,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- res->clks[0].id = "iface";
|
|
- res->clks[1].id = "core";
|
|
- res->clks[2].id = "phy";
|
|
- res->clks[3].id = "aux";
|
|
- res->clks[4].id = "ref";
|
|
-
|
|
- /* iface, core, phy are required */
|
|
- ret = devm_clk_bulk_get(dev, 3, res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- /* aux, ref are optional */
|
|
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
res->resets[0].id = "pci";
|
|
res->resets[1].id = "axi";
|
|
@@ -373,7 +361,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
|
|
{
|
|
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
|
|
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
reset_control_bulk_assert(res->num_resets, res->resets);
|
|
|
|
writel(1, pcie->parf + PARF_PHY_CTRL);
|
|
@@ -425,7 +413,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
|
|
val &= ~PHY_TEST_PWR_DOWN;
|
|
writel(val, pcie->parf + PARF_PHY_CTRL);
|
|
|
|
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
|
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
|
|
if (ret)
|
|
return ret;
|
|
|
|
@@ -476,20 +464,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
|
|
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
|
|
struct dw_pcie *pci = pcie->pci;
|
|
struct device *dev = pci->dev;
|
|
- int ret;
|
|
|
|
res->vdda = devm_regulator_get(dev, "vdda");
|
|
if (IS_ERR(res->vdda))
|
|
return PTR_ERR(res->vdda);
|
|
|
|
- res->clks[0].id = "iface";
|
|
- res->clks[1].id = "aux";
|
|
- res->clks[2].id = "master_bus";
|
|
- res->clks[3].id = "slave_bus";
|
|
-
|
|
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
res->core = devm_reset_control_get_exclusive(dev, "core");
|
|
return PTR_ERR_OR_ZERO(res->core);
|
|
@@ -500,7 +484,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
|
|
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
|
|
|
|
reset_control_assert(res->core);
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
regulator_disable(res->vdda);
|
|
}
|
|
|
|
@@ -517,7 +501,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
|
|
return ret;
|
|
}
|
|
|
|
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
|
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
|
|
if (ret) {
|
|
dev_err(dev, "cannot prepare/enable clocks\n");
|
|
goto err_assert_reset;
|
|
@@ -532,7 +516,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
|
|
return 0;
|
|
|
|
err_disable_clks:
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
err_assert_reset:
|
|
reset_control_assert(res->core);
|
|
|
|
@@ -580,14 +564,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- res->clks[0].id = "aux";
|
|
- res->clks[1].id = "cfg";
|
|
- res->clks[2].id = "bus_master";
|
|
- res->clks[3].id = "bus_slave";
|
|
-
|
|
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -596,7 +577,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
|
|
{
|
|
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
|
|
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
|
}
|
|
|
|
@@ -613,7 +594,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
|
|
return ret;
|
|
}
|
|
|
|
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
|
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
|
|
if (ret) {
|
|
dev_err(dev, "cannot prepare/enable clocks\n");
|
|
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
|
|
@@ -661,17 +642,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
|
|
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
|
|
int ret;
|
|
|
|
- res->clks[0].id = "aux";
|
|
- res->clks[1].id = "master_bus";
|
|
- res->clks[2].id = "slave_bus";
|
|
- res->clks[3].id = "iface";
|
|
-
|
|
- /* qcom,pcie-ipq4019 is defined without "iface" */
|
|
- res->num_clks = is_ipq ? 3 : 4;
|
|
-
|
|
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
res->resets[0].id = "axi_m";
|
|
res->resets[1].id = "axi_s";
|
|
@@ -742,15 +717,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
|
|
struct device *dev = pci->dev;
|
|
int ret;
|
|
|
|
- res->clks[0].id = "iface";
|
|
- res->clks[1].id = "axi_m";
|
|
- res->clks[2].id = "axi_s";
|
|
- res->clks[3].id = "ahb";
|
|
- res->clks[4].id = "aux";
|
|
-
|
|
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
res->rst[0].id = "axi_m";
|
|
res->rst[1].id = "axi_s";
|
|
@@ -771,7 +742,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
|
|
{
|
|
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
|
|
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
}
|
|
|
|
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
|
|
@@ -801,7 +772,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
|
|
*/
|
|
usleep_range(2000, 2500);
|
|
|
|
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
|
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
|
|
if (ret) {
|
|
dev_err(dev, "cannot prepare/enable clocks\n");
|
|
goto err_assert_resets;
|
|
@@ -862,8 +833,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
|
|
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
|
|
struct dw_pcie *pci = pcie->pci;
|
|
struct device *dev = pci->dev;
|
|
- unsigned int num_clks, num_opt_clks;
|
|
- unsigned int idx;
|
|
int ret;
|
|
|
|
res->rst = devm_reset_control_array_get_exclusive(dev);
|
|
@@ -877,36 +846,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
|
|
if (ret)
|
|
return ret;
|
|
|
|
- idx = 0;
|
|
- res->clks[idx++].id = "aux";
|
|
- res->clks[idx++].id = "cfg";
|
|
- res->clks[idx++].id = "bus_master";
|
|
- res->clks[idx++].id = "bus_slave";
|
|
- res->clks[idx++].id = "slave_q2a";
|
|
-
|
|
- num_clks = idx;
|
|
-
|
|
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
-
|
|
- res->clks[idx++].id = "tbu";
|
|
- res->clks[idx++].id = "ddrss_sf_tbu";
|
|
- res->clks[idx++].id = "aggre0";
|
|
- res->clks[idx++].id = "aggre1";
|
|
- res->clks[idx++].id = "noc_aggr";
|
|
- res->clks[idx++].id = "noc_aggr_4";
|
|
- res->clks[idx++].id = "noc_aggr_south_sf";
|
|
- res->clks[idx++].id = "cnoc_qx";
|
|
- res->clks[idx++].id = "sleep";
|
|
- res->clks[idx++].id = "cnoc_sf_axi";
|
|
-
|
|
- num_opt_clks = idx - num_clks;
|
|
- res->num_clks = idx;
|
|
-
|
|
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
@@ -1101,17 +1045,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
|
|
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
|
|
struct dw_pcie *pci = pcie->pci;
|
|
struct device *dev = pci->dev;
|
|
- int ret;
|
|
-
|
|
- res->clks[0].id = "iface";
|
|
- res->clks[1].id = "axi_m";
|
|
- res->clks[2].id = "axi_s";
|
|
- res->clks[3].id = "axi_bridge";
|
|
- res->clks[4].id = "rchng";
|
|
|
|
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
|
|
- if (ret < 0)
|
|
- return ret;
|
|
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
|
|
+ if (res->num_clks < 0) {
|
|
+ dev_err(dev, "Failed to get clocks\n");
|
|
+ return res->num_clks;
|
|
+ }
|
|
|
|
res->rst = devm_reset_control_array_get_exclusive(dev);
|
|
if (IS_ERR(res->rst))
|
|
@@ -1124,7 +1063,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
|
|
{
|
|
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
|
|
|
|
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
|
|
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
|
|
}
|
|
|
|
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
|
|
@@ -1153,7 +1092,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
|
|
|
|
usleep_range(2000, 2500);
|
|
|
|
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
|
|
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
|
|
}
|
|
|
|
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
|
|
--
|
|
2.47.1
|
|
|