linux/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2020 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#define SWSMU_CODE_LAYER_L4
  24
  25#include "amdgpu.h"
  26#include "amdgpu_smu.h"
  27#include "smu_cmn.h"
  28#include "soc15_common.h"
  29
  30/*
  31 * DO NOT use these for err/warn/info/debug messages.
  32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
  33 * They are more MGPU friendly.
  34 */
  35#undef pr_err
  36#undef pr_warn
  37#undef pr_info
  38#undef pr_debug
  39
  40/*
  41 * Although these are defined in each ASIC's specific header file.
  42 * They share the same definitions and values. That makes common
  43 * APIs for SMC messages issuing for all ASICs possible.
  44 */
  45#define mmMP1_SMN_C2PMSG_66                                                                            0x0282
  46#define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
  47
  48#define mmMP1_SMN_C2PMSG_82                                                                            0x0292
  49#define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
  50
  51#define mmMP1_SMN_C2PMSG_90                                                                            0x029a
  52#define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
  53
  54#define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
  55
  56#undef __SMU_DUMMY_MAP
  57#define __SMU_DUMMY_MAP(type)   #type
  58static const char* __smu_message_names[] = {
  59        SMU_MESSAGE_TYPES
  60};
  61
  62static const char *smu_get_message_name(struct smu_context *smu,
  63                                        enum smu_message_type type)
  64{
  65        if (type < 0 || type >= SMU_MSG_MAX_COUNT)
  66                return "unknown smu message";
  67
  68        return __smu_message_names[type];
  69}
  70
  71static void smu_cmn_read_arg(struct smu_context *smu,
  72                             uint32_t *arg)
  73{
  74        struct amdgpu_device *adev = smu->adev;
  75
  76        *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
  77}
  78
  79int smu_cmn_wait_for_response(struct smu_context *smu)
  80{
  81        struct amdgpu_device *adev = smu->adev;
  82        uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
  83
  84        for (i = 0; i < timeout; i++) {
  85                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
  86                if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
  87                        return cur_value;
  88
  89                udelay(1);
  90        }
  91
  92        /* timeout means wrong logic */
  93        if (i == timeout)
  94                return -ETIME;
  95
  96        return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
  97}
  98
  99int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
 100                                     uint16_t msg, uint32_t param)
 101{
 102        struct amdgpu_device *adev = smu->adev;
 103        int ret;
 104
 105        ret = smu_cmn_wait_for_response(smu);
 106        if (ret != 0x1) {
 107                dev_err(adev->dev, "Msg issuing pre-check failed(0x%x) and "
 108                       "SMU may be not in the right state!\n", ret);
 109                if (ret != -ETIME)
 110                        ret = -EIO;
 111                return ret;
 112        }
 113
 114        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
 115        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
 116        WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
 117
 118        return 0;
 119}
 120
 121int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
 122                                    enum smu_message_type msg,
 123                                    uint32_t param,
 124                                    uint32_t *read_arg)
 125{
 126        struct amdgpu_device *adev = smu->adev;
 127        int ret = 0, index = 0;
 128
 129        if (smu->adev->no_hw_access)
 130                return 0;
 131
 132        index = smu_cmn_to_asic_specific_index(smu,
 133                                               CMN2ASIC_MAPPING_MSG,
 134                                               msg);
 135        if (index < 0)
 136                return index == -EACCES ? 0 : index;
 137
 138        mutex_lock(&smu->message_lock);
 139        ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
 140        if (ret)
 141                goto out;
 142
 143        ret = smu_cmn_wait_for_response(smu);
 144        if (ret != 0x1) {
 145                if (ret == -ETIME) {
 146                        dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
 147                                smu_get_message_name(smu, msg), index, param);
 148                } else {
 149                        dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
 150                                smu_get_message_name(smu, msg), index, param,
 151                                ret);
 152                        ret = -EIO;
 153                }
 154                goto out;
 155        }
 156
 157        if (read_arg)
 158                smu_cmn_read_arg(smu, read_arg);
 159
 160        ret = 0; /* 0 as driver return value */
 161out:
 162        mutex_unlock(&smu->message_lock);
 163        return ret;
 164}
 165
 166int smu_cmn_send_smc_msg(struct smu_context *smu,
 167                         enum smu_message_type msg,
 168                         uint32_t *read_arg)
 169{
 170        return smu_cmn_send_smc_msg_with_param(smu,
 171                                               msg,
 172                                               0,
 173                                               read_arg);
 174}
 175
 176int smu_cmn_to_asic_specific_index(struct smu_context *smu,
 177                                   enum smu_cmn2asic_mapping_type type,
 178                                   uint32_t index)
 179{
 180        struct cmn2asic_msg_mapping msg_mapping;
 181        struct cmn2asic_mapping mapping;
 182
 183        switch (type) {
 184        case CMN2ASIC_MAPPING_MSG:
 185                if (index >= SMU_MSG_MAX_COUNT ||
 186                    !smu->message_map)
 187                        return -EINVAL;
 188
 189                msg_mapping = smu->message_map[index];
 190                if (!msg_mapping.valid_mapping)
 191                        return -EINVAL;
 192
 193                if (amdgpu_sriov_vf(smu->adev) &&
 194                    !msg_mapping.valid_in_vf)
 195                        return -EACCES;
 196
 197                return msg_mapping.map_to;
 198
 199        case CMN2ASIC_MAPPING_CLK:
 200                if (index >= SMU_CLK_COUNT ||
 201                    !smu->clock_map)
 202                        return -EINVAL;
 203
 204                mapping = smu->clock_map[index];
 205                if (!mapping.valid_mapping)
 206                        return -EINVAL;
 207
 208                return mapping.map_to;
 209
 210        case CMN2ASIC_MAPPING_FEATURE:
 211                if (index >= SMU_FEATURE_COUNT ||
 212                    !smu->feature_map)
 213                        return -EINVAL;
 214
 215                mapping = smu->feature_map[index];
 216                if (!mapping.valid_mapping)
 217                        return -EINVAL;
 218
 219                return mapping.map_to;
 220
 221        case CMN2ASIC_MAPPING_TABLE:
 222                if (index >= SMU_TABLE_COUNT ||
 223                    !smu->table_map)
 224                        return -EINVAL;
 225
 226                mapping = smu->table_map[index];
 227                if (!mapping.valid_mapping)
 228                        return -EINVAL;
 229
 230                return mapping.map_to;
 231
 232        case CMN2ASIC_MAPPING_PWR:
 233                if (index >= SMU_POWER_SOURCE_COUNT ||
 234                    !smu->pwr_src_map)
 235                        return -EINVAL;
 236
 237                mapping = smu->pwr_src_map[index];
 238                if (!mapping.valid_mapping)
 239                        return -EINVAL;
 240
 241                return mapping.map_to;
 242
 243        case CMN2ASIC_MAPPING_WORKLOAD:
 244                if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
 245                    !smu->workload_map)
 246                        return -EINVAL;
 247
 248                mapping = smu->workload_map[index];
 249                if (!mapping.valid_mapping)
 250                        return -EINVAL;
 251
 252                return mapping.map_to;
 253
 254        default:
 255                return -EINVAL;
 256        }
 257}
 258
 259int smu_cmn_feature_is_supported(struct smu_context *smu,
 260                                 enum smu_feature_mask mask)
 261{
 262        struct smu_feature *feature = &smu->smu_feature;
 263        int feature_id;
 264        int ret = 0;
 265
 266        feature_id = smu_cmn_to_asic_specific_index(smu,
 267                                                    CMN2ASIC_MAPPING_FEATURE,
 268                                                    mask);
 269        if (feature_id < 0)
 270                return 0;
 271
 272        WARN_ON(feature_id > feature->feature_num);
 273
 274        mutex_lock(&feature->mutex);
 275        ret = test_bit(feature_id, feature->supported);
 276        mutex_unlock(&feature->mutex);
 277
 278        return ret;
 279}
 280
 281int smu_cmn_feature_is_enabled(struct smu_context *smu,
 282                               enum smu_feature_mask mask)
 283{
 284        struct smu_feature *feature = &smu->smu_feature;
 285        struct amdgpu_device *adev = smu->adev;
 286        int feature_id;
 287        int ret = 0;
 288
 289        if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
 290                return 1;
 291
 292        feature_id = smu_cmn_to_asic_specific_index(smu,
 293                                                    CMN2ASIC_MAPPING_FEATURE,
 294                                                    mask);
 295        if (feature_id < 0)
 296                return 0;
 297
 298        WARN_ON(feature_id > feature->feature_num);
 299
 300        mutex_lock(&feature->mutex);
 301        ret = test_bit(feature_id, feature->enabled);
 302        mutex_unlock(&feature->mutex);
 303
 304        return ret;
 305}
 306
 307bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
 308                                enum smu_clk_type clk_type)
 309{
 310        enum smu_feature_mask feature_id = 0;
 311
 312        switch (clk_type) {
 313        case SMU_MCLK:
 314        case SMU_UCLK:
 315                feature_id = SMU_FEATURE_DPM_UCLK_BIT;
 316                break;
 317        case SMU_GFXCLK:
 318        case SMU_SCLK:
 319                feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
 320                break;
 321        case SMU_SOCCLK:
 322                feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
 323                break;
 324        default:
 325                return true;
 326        }
 327
 328        if (!smu_cmn_feature_is_enabled(smu, feature_id))
 329                return false;
 330
 331        return true;
 332}
 333
 334int smu_cmn_get_enabled_mask(struct smu_context *smu,
 335                             uint32_t *feature_mask,
 336                             uint32_t num)
 337{
 338        uint32_t feature_mask_high = 0, feature_mask_low = 0;
 339        struct smu_feature *feature = &smu->smu_feature;
 340        int ret = 0;
 341
 342        if (!feature_mask || num < 2)
 343                return -EINVAL;
 344
 345        if (bitmap_empty(feature->enabled, feature->feature_num)) {
 346                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
 347                if (ret)
 348                        return ret;
 349
 350                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
 351                if (ret)
 352                        return ret;
 353
 354                feature_mask[0] = feature_mask_low;
 355                feature_mask[1] = feature_mask_high;
 356        } else {
 357                bitmap_copy((unsigned long *)feature_mask, feature->enabled,
 358                             feature->feature_num);
 359        }
 360
 361        return ret;
 362}
 363
 364int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
 365                                        uint32_t *feature_mask,
 366                                        uint32_t num)
 367{
 368        uint32_t feature_mask_en_low = 0;
 369        uint32_t feature_mask_en_high = 0;
 370        struct smu_feature *feature = &smu->smu_feature;
 371        int ret = 0;
 372
 373        if (!feature_mask || num < 2)
 374                return -EINVAL;
 375
 376        if (bitmap_empty(feature->enabled, feature->feature_num)) {
 377                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
 378                                                                                 &feature_mask_en_low);
 379
 380                if (ret)
 381                        return ret;
 382
 383                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
 384                                                                                 &feature_mask_en_high);
 385
 386                if (ret)
 387                        return ret;
 388
 389                feature_mask[0] = feature_mask_en_low;
 390                feature_mask[1] = feature_mask_en_high;
 391
 392        } else {
 393                bitmap_copy((unsigned long *)feature_mask, feature->enabled,
 394                                 feature->feature_num);
 395        }
 396
 397        return ret;
 398
 399}
 400
 401uint64_t smu_cmn_get_indep_throttler_status(
 402                                        const unsigned long dep_status,
 403                                        const uint8_t *throttler_map)
 404{
 405        uint64_t indep_status = 0;
 406        uint8_t dep_bit = 0;
 407
 408        for_each_set_bit(dep_bit, &dep_status, 32)
 409                indep_status |= 1ULL << throttler_map[dep_bit];
 410
 411        return indep_status;
 412}
 413
 414int smu_cmn_feature_update_enable_state(struct smu_context *smu,
 415                                        uint64_t feature_mask,
 416                                        bool enabled)
 417{
 418        struct smu_feature *feature = &smu->smu_feature;
 419        int ret = 0;
 420
 421        if (enabled) {
 422                ret = smu_cmn_send_smc_msg_with_param(smu,
 423                                                  SMU_MSG_EnableSmuFeaturesLow,
 424                                                  lower_32_bits(feature_mask),
 425                                                  NULL);
 426                if (ret)
 427                        return ret;
 428                ret = smu_cmn_send_smc_msg_with_param(smu,
 429                                                  SMU_MSG_EnableSmuFeaturesHigh,
 430                                                  upper_32_bits(feature_mask),
 431                                                  NULL);
 432                if (ret)
 433                        return ret;
 434        } else {
 435                ret = smu_cmn_send_smc_msg_with_param(smu,
 436                                                  SMU_MSG_DisableSmuFeaturesLow,
 437                                                  lower_32_bits(feature_mask),
 438                                                  NULL);
 439                if (ret)
 440                        return ret;
 441                ret = smu_cmn_send_smc_msg_with_param(smu,
 442                                                  SMU_MSG_DisableSmuFeaturesHigh,
 443                                                  upper_32_bits(feature_mask),
 444                                                  NULL);
 445                if (ret)
 446                        return ret;
 447        }
 448
 449        mutex_lock(&feature->mutex);
 450        if (enabled)
 451                bitmap_or(feature->enabled, feature->enabled,
 452                                (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
 453        else
 454                bitmap_andnot(feature->enabled, feature->enabled,
 455                                (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
 456        mutex_unlock(&feature->mutex);
 457
 458        return ret;
 459}
 460
 461int smu_cmn_feature_set_enabled(struct smu_context *smu,
 462                                enum smu_feature_mask mask,
 463                                bool enable)
 464{
 465        struct smu_feature *feature = &smu->smu_feature;
 466        int feature_id;
 467
 468        feature_id = smu_cmn_to_asic_specific_index(smu,
 469                                                    CMN2ASIC_MAPPING_FEATURE,
 470                                                    mask);
 471        if (feature_id < 0)
 472                return -EINVAL;
 473
 474        WARN_ON(feature_id > feature->feature_num);
 475
 476        return smu_cmn_feature_update_enable_state(smu,
 477                                               1ULL << feature_id,
 478                                               enable);
 479}
 480
 481#undef __SMU_DUMMY_MAP
 482#define __SMU_DUMMY_MAP(fea)    #fea
 483static const char* __smu_feature_names[] = {
 484        SMU_FEATURE_MASKS
 485};
 486
 487static const char *smu_get_feature_name(struct smu_context *smu,
 488                                        enum smu_feature_mask feature)
 489{
 490        if (feature < 0 || feature >= SMU_FEATURE_COUNT)
 491                return "unknown smu feature";
 492        return __smu_feature_names[feature];
 493}
 494
 495size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
 496                                   char *buf)
 497{
 498        uint32_t feature_mask[2] = { 0 };
 499        int feature_index = 0;
 500        uint32_t count = 0;
 501        int8_t sort_feature[SMU_FEATURE_COUNT];
 502        size_t size = 0;
 503        int ret = 0, i;
 504
 505        if (!smu->is_apu) {
 506                ret = smu_cmn_get_enabled_mask(smu,
 507                                                feature_mask,
 508                                                2);
 509                if (ret)
 510                        return 0;
 511        } else {
 512                ret = smu_cmn_get_enabled_32_bits_mask(smu,
 513                                        feature_mask,
 514                                        2);
 515                if (ret)
 516                        return 0;
 517        }
 518
 519        size =  sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
 520                        feature_mask[1], feature_mask[0]);
 521
 522        memset(sort_feature, -1, sizeof(sort_feature));
 523
 524        for (i = 0; i < SMU_FEATURE_COUNT; i++) {
 525                feature_index = smu_cmn_to_asic_specific_index(smu,
 526                                                               CMN2ASIC_MAPPING_FEATURE,
 527                                                               i);
 528                if (feature_index < 0)
 529                        continue;
 530
 531                sort_feature[feature_index] = i;
 532        }
 533
 534        size += sprintf(buf + size, "%-2s. %-20s  %-3s : %-s\n",
 535                        "No", "Feature", "Bit", "State");
 536
 537        for (i = 0; i < SMU_FEATURE_COUNT; i++) {
 538                if (sort_feature[i] < 0)
 539                        continue;
 540
 541                size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
 542                                count++,
 543                                smu_get_feature_name(smu, sort_feature[i]),
 544                                i,
 545                                !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
 546                                "enabled" : "disabled");
 547        }
 548
 549        return size;
 550}
 551
 552int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
 553                                uint64_t new_mask)
 554{
 555        int ret = 0;
 556        uint32_t feature_mask[2] = { 0 };
 557        uint64_t feature_2_enabled = 0;
 558        uint64_t feature_2_disabled = 0;
 559        uint64_t feature_enables = 0;
 560
 561        ret = smu_cmn_get_enabled_mask(smu,
 562                                       feature_mask,
 563                                       2);
 564        if (ret)
 565                return ret;
 566
 567        feature_enables = ((uint64_t)feature_mask[1] << 32 |
 568                           (uint64_t)feature_mask[0]);
 569
 570        feature_2_enabled  = ~feature_enables & new_mask;
 571        feature_2_disabled = feature_enables & ~new_mask;
 572
 573        if (feature_2_enabled) {
 574                ret = smu_cmn_feature_update_enable_state(smu,
 575                                                          feature_2_enabled,
 576                                                          true);
 577                if (ret)
 578                        return ret;
 579        }
 580        if (feature_2_disabled) {
 581                ret = smu_cmn_feature_update_enable_state(smu,
 582                                                          feature_2_disabled,
 583                                                          false);
 584                if (ret)
 585                        return ret;
 586        }
 587
 588        return ret;
 589}
 590
 591/**
 592 * smu_cmn_disable_all_features_with_exception - disable all dpm features
 593 *                                               except this specified by
 594 *                                               @mask
 595 *
 596 * @smu:               smu_context pointer
 597 * @no_hw_disablement: whether real dpm disablement should be performed
 598 *                     true: update the cache(about dpm enablement state) only
 599 *                     false: real dpm disablement plus cache update
 600 * @mask:              the dpm feature which should not be disabled
 601 *                     SMU_FEATURE_COUNT: no exception, all dpm features
 602 *                     to disable
 603 *
 604 * Returns:
 605 * 0 on success or a negative error code on failure.
 606 */
 607int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
 608                                                bool no_hw_disablement,
 609                                                enum smu_feature_mask mask)
 610{
 611        struct smu_feature *feature = &smu->smu_feature;
 612        uint64_t features_to_disable = U64_MAX;
 613        int skipped_feature_id;
 614
 615        if (mask != SMU_FEATURE_COUNT) {
 616                skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
 617                                                                    CMN2ASIC_MAPPING_FEATURE,
 618                                                                    mask);
 619                if (skipped_feature_id < 0)
 620                        return -EINVAL;
 621
 622                features_to_disable &= ~(1ULL << skipped_feature_id);
 623        }
 624
 625        if (no_hw_disablement) {
 626                mutex_lock(&feature->mutex);
 627                bitmap_andnot(feature->enabled, feature->enabled,
 628                                (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
 629                mutex_unlock(&feature->mutex);
 630
 631                return 0;
 632        } else {
 633                return smu_cmn_feature_update_enable_state(smu,
 634                                                           features_to_disable,
 635                                                           0);
 636        }
 637}
 638
 639int smu_cmn_get_smc_version(struct smu_context *smu,
 640                            uint32_t *if_version,
 641                            uint32_t *smu_version)
 642{
 643        int ret = 0;
 644
 645        if (!if_version && !smu_version)
 646                return -EINVAL;
 647
 648        if (smu->smc_fw_if_version && smu->smc_fw_version)
 649        {
 650                if (if_version)
 651                        *if_version = smu->smc_fw_if_version;
 652
 653                if (smu_version)
 654                        *smu_version = smu->smc_fw_version;
 655
 656                return 0;
 657        }
 658
 659        if (if_version) {
 660                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
 661                if (ret)
 662                        return ret;
 663
 664                smu->smc_fw_if_version = *if_version;
 665        }
 666
 667        if (smu_version) {
 668                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
 669                if (ret)
 670                        return ret;
 671
 672                smu->smc_fw_version = *smu_version;
 673        }
 674
 675        return ret;
 676}
 677
 678int smu_cmn_update_table(struct smu_context *smu,
 679                         enum smu_table_id table_index,
 680                         int argument,
 681                         void *table_data,
 682                         bool drv2smu)
 683{
 684        struct smu_table_context *smu_table = &smu->smu_table;
 685        struct amdgpu_device *adev = smu->adev;
 686        struct smu_table *table = &smu_table->driver_table;
 687        int table_id = smu_cmn_to_asic_specific_index(smu,
 688                                                      CMN2ASIC_MAPPING_TABLE,
 689                                                      table_index);
 690        uint32_t table_size;
 691        int ret = 0;
 692        if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
 693                return -EINVAL;
 694
 695        table_size = smu_table->tables[table_index].size;
 696
 697        if (drv2smu) {
 698                memcpy(table->cpu_addr, table_data, table_size);
 699                /*
 700                 * Flush hdp cache: to guard the content seen by
 701                 * GPU is consitent with CPU.
 702                 */
 703                amdgpu_asic_flush_hdp(adev, NULL);
 704        }
 705
 706        ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
 707                                          SMU_MSG_TransferTableDram2Smu :
 708                                          SMU_MSG_TransferTableSmu2Dram,
 709                                          table_id | ((argument & 0xFFFF) << 16),
 710                                          NULL);
 711        if (ret)
 712                return ret;
 713
 714        if (!drv2smu) {
 715                amdgpu_asic_invalidate_hdp(adev, NULL);
 716                memcpy(table_data, table->cpu_addr, table_size);
 717        }
 718
 719        return 0;
 720}
 721
 722int smu_cmn_write_watermarks_table(struct smu_context *smu)
 723{
 724        void *watermarks_table = smu->smu_table.watermarks_table;
 725
 726        if (!watermarks_table)
 727                return -EINVAL;
 728
 729        return smu_cmn_update_table(smu,
 730                                    SMU_TABLE_WATERMARKS,
 731                                    0,
 732                                    watermarks_table,
 733                                    true);
 734}
 735
 736int smu_cmn_write_pptable(struct smu_context *smu)
 737{
 738        void *pptable = smu->smu_table.driver_pptable;
 739
 740        return smu_cmn_update_table(smu,
 741                                    SMU_TABLE_PPTABLE,
 742                                    0,
 743                                    pptable,
 744                                    true);
 745}
 746
 747int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
 748                                     void *metrics_table,
 749                                     bool bypass_cache)
 750{
 751        struct smu_table_context *smu_table= &smu->smu_table;
 752        uint32_t table_size =
 753                smu_table->tables[SMU_TABLE_SMU_METRICS].size;
 754        int ret = 0;
 755
 756        if (bypass_cache ||
 757            !smu_table->metrics_time ||
 758            time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
 759                ret = smu_cmn_update_table(smu,
 760                                       SMU_TABLE_SMU_METRICS,
 761                                       0,
 762                                       smu_table->metrics_table,
 763                                       false);
 764                if (ret) {
 765                        dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
 766                        return ret;
 767                }
 768                smu_table->metrics_time = jiffies;
 769        }
 770
 771        if (metrics_table)
 772                memcpy(metrics_table, smu_table->metrics_table, table_size);
 773
 774        return 0;
 775}
 776
 777int smu_cmn_get_metrics_table(struct smu_context *smu,
 778                              void *metrics_table,
 779                              bool bypass_cache)
 780{
 781        int ret = 0;
 782
 783        mutex_lock(&smu->metrics_lock);
 784        ret = smu_cmn_get_metrics_table_locked(smu,
 785                                               metrics_table,
 786                                               bypass_cache);
 787        mutex_unlock(&smu->metrics_lock);
 788
 789        return ret;
 790}
 791
 792void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 793{
 794        struct metrics_table_header *header = (struct metrics_table_header *)table;
 795        uint16_t structure_size;
 796
 797#define METRICS_VERSION(a, b)   ((a << 16) | b )
 798
 799        switch (METRICS_VERSION(frev, crev)) {
 800        case METRICS_VERSION(1, 0):
 801                structure_size = sizeof(struct gpu_metrics_v1_0);
 802                break;
 803        case METRICS_VERSION(1, 1):
 804                structure_size = sizeof(struct gpu_metrics_v1_1);
 805                break;
 806        case METRICS_VERSION(1, 2):
 807                structure_size = sizeof(struct gpu_metrics_v1_2);
 808                break;
 809        case METRICS_VERSION(1, 3):
 810                structure_size = sizeof(struct gpu_metrics_v1_3);
 811                break;
 812        case METRICS_VERSION(2, 0):
 813                structure_size = sizeof(struct gpu_metrics_v2_0);
 814                break;
 815        case METRICS_VERSION(2, 1):
 816                structure_size = sizeof(struct gpu_metrics_v2_1);
 817                break;
 818        case METRICS_VERSION(2, 2):
 819                structure_size = sizeof(struct gpu_metrics_v2_2);
 820                break;
 821        default:
 822                return;
 823        }
 824
 825#undef METRICS_VERSION
 826
 827        memset(header, 0xFF, structure_size);
 828
 829        header->format_revision = frev;
 830        header->content_revision = crev;
 831        header->structure_size = structure_size;
 832
 833}
 834
 835int smu_cmn_set_mp1_state(struct smu_context *smu,
 836                          enum pp_mp1_state mp1_state)
 837{
 838        enum smu_message_type msg;
 839        int ret;
 840
 841        switch (mp1_state) {
 842        case PP_MP1_STATE_SHUTDOWN:
 843                msg = SMU_MSG_PrepareMp1ForShutdown;
 844                break;
 845        case PP_MP1_STATE_UNLOAD:
 846                msg = SMU_MSG_PrepareMp1ForUnload;
 847                break;
 848        case PP_MP1_STATE_RESET:
 849                msg = SMU_MSG_PrepareMp1ForReset;
 850                break;
 851        case PP_MP1_STATE_NONE:
 852        default:
 853                return 0;
 854        }
 855
 856        ret = smu_cmn_send_smc_msg(smu, msg, NULL);
 857        if (ret)
 858                dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
 859
 860        return ret;
 861}
 862