diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst index b627c6f36bcf5a966b9f7ff7e34059f44ffe8f38..69c04052861df91fd452c3c172175199de37d837 100644 --- a/Documentation/arch/x86/microcode.rst +++ b/Documentation/arch/x86/microcode.rst @@ -35,6 +35,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -69,6 +71,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -217,7 +223,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -227,6 +234,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cadea3f3161ef9f89e16c0f2a479e7d567b642b5..c64546d981b6b717339b308407307b52ee595660 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1311,7 +1311,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON config MICROCODE_INITRD32 def_bool y diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3f4834b20ea588146dfe913a850d0..5100469fef323206881b62c9963c8f7f17ec31ee 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -196,10 +196,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -268,6 +279,13 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -325,6 +343,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -347,6 +367,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -372,6 +398,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -396,6 +428,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -709,10 +746,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model == 0x6) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index ed0eaf65c43721ebfcf4f4ee74f827c6d91ec5d1..4230a80a5a9de2264ba53a96e06aea1ea1709a95 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -82,6 +82,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -119,6 +123,10 @@ static inline bool amd_gart_present(void) #define node_to_amd_nb(x) NULL #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4decedf2c0779e71d15d2c2684e12996c8d93761..9450594f1709e9e9ceee0ca18083c490c8373570 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -61,6 +61,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index cab4d8b1535d61371e785a7cd551f299cb663232..9f759c7bf0370b3a86c0ba52acf55f43bdd696f7 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -44,10 +44,17 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 + /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -123,16 +130,22 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, {} }; @@ -219,6 +232,226 @@ int amd_smn_write(u16 node, u32 address, u32 value) } EXPORT_SYMBOL_GPL(amd_smn_write); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) +{ + struct pci_dev *df_func = NULL; + u32 device; + int err; + + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { + return -ENODEV; + } + + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); + return -ENODEV; + } + + err = pci_read_config_dword(df_func, offset, value); + if (err) + pr_warn("Error reading DF F%d register.\n", func); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + if (boot_cpu_data.x86_model == 0x6) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df_register(misc, 1, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} static int amd_cache_northbridges(void) { @@ -239,6 +472,11 @@ static int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 8f86eacf69f7c965a92996ddf669c02b2d62b751..7c4ce361c728cc53ae0a8e98eb97ab1ca1cc21db 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,11 +708,30 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index a7b3ef4c4de91e500e1aad56d6337e893f73999d..bf13027bf6b494bed28308ead4a0ae8646cf8c58 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "cpu.h" @@ -80,12 +81,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->x86_max_cores /= smp_num_siblings; /* - * In case leaf B is available, use it to derive + * From model 0x4, leaf B is available, so use it to derive * topology information. */ err = detect_extended_topology(c); - if (!err) + if (!err) { c->x86_coreid_bits = get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } /* * Socket ID is ApicId[6] for the processors with model <= 0x3 @@ -240,6 +243,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); } static void early_init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 9373ec01c5ae1730784cb4d0c7dfa9f24f39d58b..aad4c2958bc7f8ae4f33adfa731075de82a80896 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -477,15 +477,18 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%02hhxh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -530,7 +533,9 @@ static int __init save_microcode_in_initrd(void) enum ucode_state ret; struct cpio_data cp; - if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + if (dis_ucode_ldr || + ((c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) && + (c->x86_vendor != X86_VENDOR_HYGON))) return 0; find_blobs_in_containers(cpuid_1_eax, &cp); @@ -883,7 +888,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; @@ -891,8 +896,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) if (force_minrev) return UCODE_NFOUND; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -943,6 +952,25 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_CPU_SUP_HYGON +struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + strscpy((char *)ucode_path, "kernel/x86/microcode/HygonGenuine.bin", + sizeof(ucode_path)); + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 666d25bbc5ad200ef9c8fe4b6f69c1accc5b2fb9..3b28bb30a7402145a37255b6f8bcc6d42821dc8d 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -114,7 +114,8 @@ static bool __init check_loader_disabled_bsp(void) if (native_cpuid_ecx(1) & BIT(31)) return true; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) return true; } @@ -147,6 +148,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -178,6 +183,9 @@ void load_ucode_ap(void) if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_ap(cpuid_1_eax); break; + case X86_VENDOR_HYGON: + load_ucode_amd_ap(cpuid_1_eax); + break; default: break; } @@ -237,6 +245,9 @@ static void reload_early_microcode(unsigned int cpu) if (family >= 0x10) reload_ucode_amd(cpu); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(cpu); + break; default: break; } @@ -822,6 +833,8 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index f8047b12329a9749bd069d230dcbce6752acd525..8e7ae298158a94ef4096cb00a9e9d41e84e881c2 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -49,6 +49,9 @@ struct cpio_data find_microcode_in_initrd(const char *path); #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -75,6 +78,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -107,6 +113,12 @@ static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } #endif /* !CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_CPU_SUP_HYGON +struct microcode_ops *init_hygon_microcode(void); +#else /* CONFIG_CPU_SUP_HYGON */ +static inline struct microcode_ops *init_hygon_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_HYGON */ + #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 19e0681f04356d6b184014003e23cc5cec3980f6..73b6d52454196e6ff5115b2803d3ffb75ab2a456 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -761,7 +761,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -912,7 +913,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -943,7 +945,9 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9b6642d0087130ca7450c8838878b525686e68c0..f02b70c911f19951ffc694b46aaeeb8313227b8a 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -96,6 +96,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return err; } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -1135,8 +1146,11 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr ctx.nid = nid; ctx.inst_id = umc; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + df_indirect_read_instance(nid, 0, 0x214, umc, &ctx.tmp)) + goto out_err; + else if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -1160,6 +1174,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 2) & 0x3; lgcy_mmio_hole_en = ctx.tmp & BIT(1); intlv_num_chan = (ctx.tmp >> 4) & 0xF; intlv_addr_sel = (ctx.tmp >> 8) & 0x7; @@ -1176,7 +1193,8 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) goto out_err; - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; intlv_num_dies = (ctx.tmp >> 10) & 0x3; dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -1194,6 +1212,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -1212,8 +1233,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -1232,7 +1254,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) goto out_err; - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (ctx.tmp >> 8) & 0x7FF; + else + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -1252,8 +1277,13 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (ctx.tmp >> 12) & 0xF; + die_id_mask = ctx.tmp & 0x7FF; + } else { + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -1261,7 +1291,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (ctx.tmp >> 16) & 0x7FF; + else + socket_id_mask = (ctx.tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } @@ -1608,7 +1641,10 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -1717,11 +1753,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -1739,8 +1781,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1823,7 +1865,8 @@ static void umc_determine_memory_type(struct amd64_pvt *pvt) * Check if the system supports the "DDR Type" field in UMC Config * and has DDR5 DIMMs in use. */ - if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if ((pvt->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { if (umc->dimm_cfg & BIT(5)) umc->dram_type = MEM_LRDDR5; else if (umc->dimm_cfg & BIT(4)) @@ -3057,7 +3100,11 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static void umc_get_err_info(struct mce *m, struct err_info *err) { - err->channel = (m->ipid & GENMASK(31, 0)) >> 20; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + err->channel = (m->ipid & GENMASK(23, 0)) >> 20; + else + err->channel = (m->ipid & GENMASK(31, 0)) >> 20; err->csrow = m->synd & 0x7; } @@ -3068,6 +3115,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; node_id = fixup_node_id(node_id, m); @@ -3098,7 +3146,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } @@ -3172,8 +3225,11 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); @@ -4104,6 +4160,18 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: + if (pvt->model == 0x4) { + pvt->ctl_name = "F18h_M04h"; + pvt->max_mcs = 3; + break; + } else if (pvt->model == 0x5) { + pvt->ctl_name = "F18h_M05h"; + pvt->max_mcs = 1; + break; + } else if (pvt->model == 0x6) { + pvt->ctl_name = "F18h_M06h"; + break; + } pvt->ctl_name = "F18h"; break; @@ -4367,6 +4435,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; if (ghes_get_devices()) @@ -4384,8 +4453,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -4393,7 +4467,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -4438,6 +4512,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -4449,7 +4524,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df5f19dd64cb07b2f275f9ff8ee118..06e29d2b51d1ed32071d0371369bb1e9de2a3ba1 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1187,8 +1187,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && - xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index bae0becfa24be9c9c774a90712449aacdbf3484a..faf3955a311f792e7e9fcd6d4586e9bd6c3f700a 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -84,6 +84,11 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define AMD_I3255_STR "3255" +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -94,6 +99,7 @@ struct k10temp_data { bool is_zen; u32 ccd_offset; bool disp_negative; + void *priv; }; #define TCTL_BIT 0 @@ -201,6 +207,23 @@ static int k10temp_read_labels(struct device *dev, return 0; } +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) +{ + struct hygon_private *h_priv; + + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); +} + static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { @@ -221,7 +244,10 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), ZEN_CCD_TEMP(data->ccd_offset, channel - 2), ®val); *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; @@ -388,14 +414,48 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, } } +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -423,7 +483,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + } else if (boot_cpu_data.x86 == 0x17) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; data->is_zen = true; @@ -448,6 +508,25 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(pdev, data, 8); break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; @@ -528,6 +607,8 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 809fbd014cd6833749a677bba4b6845854459d3b..cc170c114e1090ddfd1a6600fca310961711ac6f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1043,8 +1043,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 45efb7e5d725460b39de534c67b5fb5be0d31d1d..9d24ddfc2d7e7a2abab341d7b86ddbfa4be9855f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3001,6 +3001,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -3026,7 +3029,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index fe4a3589bb3fdae60fafb782a427fcb5c49e9346..3fb4124dca0e35260fa5b4934d6e0185d91fb410 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2596,6 +2596,8 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48edca72dcf57cda55b674c3a9dd5e3..101183b8d3bcd897191fcdae216bbc624248d488 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -350,6 +350,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool hygon_dword_access:1; int poll_count; diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 7f3a000fab0ce0f9827f215156fb35cffc6c49b6..df37a85cf27cc79d938406c2b54a778d3945b66f 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -410,7 +410,10 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; - snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + if (bus->hygon_dword_access) + snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + else + snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) @@ -475,7 +478,10 @@ static void azx_int_disable(struct hdac_bus *bus) /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_INT_MASK, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index 214a0680524b0b487b6803cdd0667966ab3a0bbd..1bf30b8f4bff258c61d1c1751d213ed4bacb3a71 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -146,11 +146,15 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ - if (bus->access_sdnctl_in_dword) + if (bus->access_sdnctl_in_dword || bus->hygon_dword_access) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else @@ -166,11 +170,21 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_start); */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { - snd_hdac_stream_updateb(azx_dev, SD_CTL, - SD_CTL_DMA_START | SD_INT_MASK, 0); - snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + struct hdac_bus *bus = azx_dev->bus; + + if (bus->hygon_dword_access) { + snd_hdac_stream_updatel(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } else { + snd_hdac_stream_updateb(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } azx_dev->running = false; } @@ -225,12 +239,16 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; + struct hdac_bus *bus = azx_dev->bus; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; - snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); @@ -238,7 +256,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 75148485b75538008b24385ac1fa8d7b9d2b93a2..962e0bd90ec07d1ba8d5cd48c603fcd79386af7d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -238,6 +238,7 @@ enum { AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, AZX_DRIVER_LOONGSON, + AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -350,6 +351,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", [AZX_DRIVER_LOONGSON] = "HDA Loongson", + [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -1876,6 +1878,10 @@ static int azx_first_init(struct azx *chip) bus->access_sdnctl_in_dword = 1; } + if (chip->driver_type == AZX_DRIVER_HYGON && + chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) + bus->hygon_dword_access = 1; + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -2749,6 +2755,9 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_LOONGSON }, { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI), .driver_data = AZX_DRIVER_LOONGSON }, + /* Hygon HDAudio */ + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids);