linux/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
<<
>>
Prefs
   1#include <inc/core_status.h>
   2#include <dc_link.h>
   3#include <inc/link_hwss.h>
   4#include <inc/link_dpcd.h>
   5#include "drm/drm_dp_helper.h"
   6#include <dc_dp_types.h>
   7#include "dm_helpers.h"
   8
   9#define END_ADDRESS(start, size) (start + size - 1)
  10#define ADDRESS_RANGE_SIZE(start, end) (end - start + 1)
  11struct dpcd_address_range {
  12        uint32_t start;
  13        uint32_t end;
  14};
  15
  16static enum dc_status internal_link_read_dpcd(
  17        struct dc_link *link,
  18        uint32_t address,
  19        uint8_t *data,
  20        uint32_t size)
  21{
  22        if (!link->aux_access_disabled &&
  23                        !dm_helpers_dp_read_dpcd(link->ctx,
  24                        link, address, data, size)) {
  25                return DC_ERROR_UNEXPECTED;
  26        }
  27
  28        return DC_OK;
  29}
  30
  31static enum dc_status internal_link_write_dpcd(
  32        struct dc_link *link,
  33        uint32_t address,
  34        const uint8_t *data,
  35        uint32_t size)
  36{
  37        if (!link->aux_access_disabled &&
  38                        !dm_helpers_dp_write_dpcd(link->ctx,
  39                        link, address, data, size)) {
  40                return DC_ERROR_UNEXPECTED;
  41        }
  42
  43        return DC_OK;
  44}
  45
  46/*
  47 * Partition the entire DPCD address space
  48 * XXX: This partitioning must cover the entire DPCD address space,
  49 * and must contain no gaps or overlapping address ranges.
  50 */
  51static const struct dpcd_address_range mandatory_dpcd_partitions[] = {
  52        { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1},
  53        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
  54        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
  55        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
  56        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
  57        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
  58        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
  59        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 },
  60        { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
  61        /*
  62         * The FEC registers are contiguous
  63         */
  64        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 },
  65        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 },
  66        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 },
  67        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 },
  68        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 },
  69        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 },
  70        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 },
  71        { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD },
  72        /* all remaining DPCD addresses */
  73        { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } };
  74
  75static inline bool do_addresses_intersect_with_range(
  76                const struct dpcd_address_range *range,
  77                const uint32_t start_address,
  78                const uint32_t end_address)
  79{
  80        return start_address <= range->end && end_address >= range->start;
  81}
  82
  83static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size)
  84{
  85        const uint32_t end_address = END_ADDRESS(address, size);
  86        uint32_t partition_iterator = 0;
  87
  88        /*
  89         * find current partition
  90         * this loop spins forever if partition map above is not surjective
  91         */
  92        while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator],
  93                                address, end_address))
  94                partition_iterator++;
  95        if (end_address < mandatory_dpcd_partitions[partition_iterator].end)
  96                return size;
  97        return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end);
  98}
  99
 100/*
 101 * Ranges of DPCD addresses that must be read in a single transaction
 102 * XXX: Do not allow any two address ranges in this array to overlap
 103 */
 104static const struct dpcd_address_range mandatory_dpcd_blocks[] = {
 105        { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }};
 106
 107/*
 108 * extend addresses to read all mandatory blocks together
 109 */
 110static void dpcd_extend_address_range(
 111                const uint32_t in_address,
 112                uint8_t * const in_data,
 113                const uint32_t in_size,
 114                uint32_t *out_address,
 115                uint8_t **out_data,
 116                uint32_t *out_size)
 117{
 118        const uint32_t end_address = END_ADDRESS(in_address, in_size);
 119        const struct dpcd_address_range *addr_range;
 120        struct dpcd_address_range new_addr_range;
 121        uint32_t i;
 122
 123        new_addr_range.start = in_address;
 124        new_addr_range.end = end_address;
 125        for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) {
 126                addr_range = &mandatory_dpcd_blocks[i];
 127                if (addr_range->start <= in_address && addr_range->end >= in_address)
 128                        new_addr_range.start = addr_range->start;
 129
 130                if (addr_range->start <= end_address && addr_range->end >= end_address)
 131                        new_addr_range.end = addr_range->end;
 132        }
 133        *out_address = in_address;
 134        *out_size = in_size;
 135        *out_data = in_data;
 136        if (new_addr_range.start != in_address || new_addr_range.end != end_address) {
 137                *out_address = new_addr_range.start;
 138                *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end);
 139                *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL);
 140        }
 141}
 142
 143/*
 144 * Reduce the AUX reply down to the values the caller requested
 145 */
 146static void dpcd_reduce_address_range(
 147                const uint32_t extended_address,
 148                uint8_t * const extended_data,
 149                const uint32_t extended_size,
 150                const uint32_t reduced_address,
 151                uint8_t * const reduced_data,
 152                const uint32_t reduced_size)
 153{
 154        const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
 155        const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size);
 156        const uint32_t offset = reduced_address - extended_address;
 157
 158        if (extended_end_address == reduced_end_address && extended_address == reduced_address)
 159                return; /* extended and reduced address ranges point to the same data */
 160
 161        memcpy(&extended_data[offset], reduced_data, reduced_size);
 162        kfree(extended_data);
 163}
 164
 165enum dc_status core_link_read_dpcd(
 166        struct dc_link *link,
 167        uint32_t address,
 168        uint8_t *data,
 169        uint32_t size)
 170{
 171        uint32_t extended_address;
 172        uint32_t partitioned_address;
 173        uint8_t *extended_data;
 174        uint32_t extended_size;
 175        /* size of the remaining partitioned address space */
 176        uint32_t size_left_to_read;
 177        enum dc_status status;
 178        /* size of the next partition to be read from */
 179        uint32_t partition_size;
 180        uint32_t data_index = 0;
 181
 182        dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size);
 183        partitioned_address = extended_address;
 184        size_left_to_read = extended_size;
 185        while (size_left_to_read) {
 186                partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read);
 187                status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size);
 188                if (status != DC_OK)
 189                        break;
 190                partitioned_address += partition_size;
 191                data_index += partition_size;
 192                size_left_to_read -= partition_size;
 193        }
 194        dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size);
 195        return status;
 196}
 197
 198enum dc_status core_link_write_dpcd(
 199        struct dc_link *link,
 200        uint32_t address,
 201        const uint8_t *data,
 202        uint32_t size)
 203{
 204        uint32_t partition_size;
 205        uint32_t data_index = 0;
 206        enum dc_status status;
 207
 208        while (size) {
 209                partition_size = dpcd_get_next_partition_size(address, size);
 210                status = internal_link_write_dpcd(link, address, &data[data_index], partition_size);
 211                if (status != DC_OK)
 212                        break;
 213                address += partition_size;
 214                data_index += partition_size;
 215                size -= partition_size;
 216        }
 217        return status;
 218}
 219