summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2023-07-26 06:45:31 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2023-07-26 06:45:31 +0000
commit2cf24dfdfce5929139ca5b8f43b99bf41b4293c5 (patch)
tree063274573049e11b41fc1459e3c165b89b727c33 /sys/dev/pci
parent46d42b9429b9602101c32d165c3a746ddf3eb377 (diff)
drm/amd/pm: conditionally disable pcie lane/speed switching for SMU13
From Mario Limonciello bd8cd38d3ac6b6410ac4e7401ef3dca057a9b285 in linux-6.1.y/6.1.40 31c7a3b378a136adc63296a2ff17645896fcf303 in mainline linux
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0.c46
1 files changed, 43 insertions, 3 deletions
diff --git a/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 644b6408e00..ee8b010038f 100644
--- a/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2490,6 +2490,29 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
+/*
+ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+ * speed switching. Until we have confirmation from Intel that a specific host
+ * supports it, it's safer that we keep it disabled for all.
+ *
+ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
+ */
+static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+#ifdef __linux__
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+#else
+ if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+#endif
+ return false;
+#endif
+ return true;
+}
+
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
@@ -2497,15 +2520,32 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
+ int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
int ret, i;
- for (i = 0; i < pcie_table->num_of_link_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ }
+ for (i = 0; i < num_of_levels; i++) {
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
smu_pcie_arg |= pcie_table->pcie_lane[i];