1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include "dm_services.h"
29
30#include "dc.h"
31
32#include "core_status.h"
33#include "core_types.h"
34#include "hw_sequencer.h"
35#include "dce/dce_hwseq.h"
36
37#include "resource.h"
38
39#include "clk_mgr.h"
40#include "clock_source.h"
41#include "dc_bios_types.h"
42
43#include "bios_parser_interface.h"
44#include "bios/bios_parser_helper.h"
45#include "include/irq_service_interface.h"
46#include "transform.h"
47#include "dmcu.h"
48#include "dpp.h"
49#include "timing_generator.h"
50#include "abm.h"
51#include "virtual/virtual_link_encoder.h"
52#include "hubp.h"
53
54#include "link_hwss.h"
55#include "link_encoder.h"
56#include "link_enc_cfg.h"
57
58#include "dc_link.h"
59#include "dc_link_ddc.h"
60#include "dm_helpers.h"
61#include "mem_input.h"
62
63#include "dc_link_dp.h"
64#include "dc_dmub_srv.h"
65
66#include "dsc.h"
67
68#include "vm_helper.h"
69
70#include "dce/dce_i2c.h"
71
72#include "dmub/dmub_srv.h"
73
74#include "i2caux_interface.h"
75#include "dce/dmub_hw_lock_mgr.h"
76
77#include "dc_trace.h"
78
79#define CTX \
80 dc->ctx
81
82#define DC_LOGGER \
83 dc->ctx->logger
84
85static const char DC_BUILD_ID[] = "production-build";
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
141{
142 if (new > *original)
143 *original = new;
144}
145
146static void destroy_links(struct dc *dc)
147{
148 uint32_t i;
149
150 for (i = 0; i < dc->link_count; i++) {
151 if (NULL != dc->links[i])
152 link_destroy(&dc->links[i]);
153 }
154}
155
156static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
157{
158 int i;
159 uint32_t count = 0;
160
161 for (i = 0; i < num_links; i++) {
162 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
163 links[i]->is_internal_display)
164 count++;
165 }
166
167 return count;
168}
169
170static int get_seamless_boot_stream_count(struct dc_state *ctx)
171{
172 uint8_t i;
173 uint8_t seamless_boot_stream_count = 0;
174
175 for (i = 0; i < ctx->stream_count; i++)
176 if (ctx->streams[i]->apply_seamless_boot_optimization)
177 seamless_boot_stream_count++;
178
179 return seamless_boot_stream_count;
180}
181
182static bool create_links(
183 struct dc *dc,
184 uint32_t num_virtual_links)
185{
186 int i;
187 int connectors_num;
188 struct dc_bios *bios = dc->ctx->dc_bios;
189
190 dc->link_count = 0;
191
192 connectors_num = bios->funcs->get_connectors_number(bios);
193
194 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195
196 if (connectors_num > ENUM_ID_COUNT) {
197 dm_error(
198 "DC: Number of connectors %d exceeds maximum of %d!\n",
199 connectors_num,
200 ENUM_ID_COUNT);
201 return false;
202 }
203
204 dm_output_to_console(
205 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
206 __func__,
207 connectors_num,
208 num_virtual_links);
209
210 for (i = 0; i < connectors_num; i++) {
211 struct link_init_data link_init_params = {0};
212 struct dc_link *link;
213
214 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215
216 link_init_params.ctx = dc->ctx;
217
218 link_init_params.connector_index = i;
219 link_init_params.link_index = dc->link_count;
220 link_init_params.dc = dc;
221 link = link_create(&link_init_params);
222
223 if (link) {
224 dc->links[dc->link_count] = link;
225 link->dc = dc;
226 ++dc->link_count;
227 }
228 }
229
230 DC_LOG_DC("BIOS object table - end");
231
232 for (i = 0; i < num_virtual_links; i++) {
233 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
234 struct encoder_init_data enc_init = {0};
235
236 if (link == NULL) {
237 BREAK_TO_DEBUGGER();
238 goto failed_alloc;
239 }
240
241 link->link_index = dc->link_count;
242 dc->links[dc->link_count] = link;
243 dc->link_count++;
244
245 link->ctx = dc->ctx;
246 link->dc = dc;
247 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
248 link->link_id.type = OBJECT_TYPE_CONNECTOR;
249 link->link_id.id = CONNECTOR_ID_VIRTUAL;
250 link->link_id.enum_id = ENUM_ID_1;
251 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
252
253 if (!link->link_enc) {
254 BREAK_TO_DEBUGGER();
255 goto failed_alloc;
256 }
257
258 link->link_status.dpcd_caps = &link->dpcd_caps;
259
260 enc_init.ctx = dc->ctx;
261 enc_init.channel = CHANNEL_ID_UNKNOWN;
262 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
263 enc_init.transmitter = TRANSMITTER_UNKNOWN;
264 enc_init.connector = link->link_id;
265 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
266 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
267 enc_init.encoder.enum_id = ENUM_ID_1;
268 virtual_link_encoder_construct(link->link_enc, &enc_init);
269 }
270
271 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
272
273 return true;
274
275failed_alloc:
276 return false;
277}
278
279static struct dc_perf_trace *dc_perf_trace_create(void)
280{
281 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
282}
283
284static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
285{
286 kfree(*perf_trace);
287 *perf_trace = NULL;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302bool dc_stream_adjust_vmin_vmax(struct dc *dc,
303 struct dc_stream_state *stream,
304 struct dc_crtc_timing_adjust *adjust)
305{
306 int i;
307 bool ret = false;
308
309 stream->adjust.v_total_max = adjust->v_total_max;
310 stream->adjust.v_total_mid = adjust->v_total_mid;
311 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
312 stream->adjust.v_total_min = adjust->v_total_min;
313
314 for (i = 0; i < MAX_PIPES; i++) {
315 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
316
317 if (pipe->stream == stream && pipe->stream_res.tg) {
318 dc->hwss.set_drr(&pipe,
319 1,
320 *adjust);
321
322 ret = true;
323 }
324 }
325 return ret;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
343 struct dc_stream_state *stream,
344 uint32_t *refresh_rate)
345{
346 bool status = false;
347
348 int i = 0;
349
350 for (i = 0; i < MAX_PIPES; i++) {
351 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
352
353 if (pipe->stream == stream && pipe->stream_res.tg) {
354
355
356
357 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
358 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
359
360 status = true;
361
362 break;
363 }
364 }
365 }
366
367 return status;
368}
369
370bool dc_stream_get_crtc_position(struct dc *dc,
371 struct dc_stream_state **streams, int num_streams,
372 unsigned int *v_pos, unsigned int *nom_v_pos)
373{
374
375 const struct dc_stream_state *stream = streams[0];
376 int i;
377 bool ret = false;
378 struct crtc_position position;
379
380 for (i = 0; i < MAX_PIPES; i++) {
381 struct pipe_ctx *pipe =
382 &dc->current_state->res_ctx.pipe_ctx[i];
383
384 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
385 dc->hwss.get_position(&pipe, 1, &position);
386
387 *v_pos = position.vertical_count;
388 *nom_v_pos = position.nominal_vcount;
389 ret = true;
390 }
391 }
392 return ret;
393}
394
395#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
396bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
397 struct crc_params *crc_window)
398{
399 int i;
400 struct dmcu *dmcu = dc->res_pool->dmcu;
401 struct pipe_ctx *pipe;
402 struct crc_region tmp_win, *crc_win;
403 struct otg_phy_mux mapping_tmp, *mux_mapping;
404
405
406 if (!crc_window)
407 return false;
408
409 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
410 crc_win = &tmp_win;
411 mux_mapping = &mapping_tmp;
412
413 tmp_win.x_start = crc_window->windowa_x_start;
414 tmp_win.y_start = crc_window->windowa_y_start;
415 tmp_win.x_end = crc_window->windowa_x_end;
416 tmp_win.y_end = crc_window->windowa_y_end;
417
418 for (i = 0; i < MAX_PIPES; i++) {
419 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
420 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
421 break;
422 }
423
424
425 if (i == MAX_PIPES)
426 return false;
427
428
429
430 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
431 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
432
433 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
434 } else {
435 DC_LOG_DC("dmcu is not initialized");
436 return false;
437 }
438
439 return true;
440}
441
442bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
443{
444 int i;
445 struct dmcu *dmcu = dc->res_pool->dmcu;
446 struct pipe_ctx *pipe;
447 struct otg_phy_mux mapping_tmp, *mux_mapping;
448
449 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
450 mux_mapping = &mapping_tmp;
451
452 for (i = 0; i < MAX_PIPES; i++) {
453 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
454 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
455 break;
456 }
457
458
459 if (i == MAX_PIPES)
460 return false;
461
462
463
464 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
465 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
466
467 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
468 } else {
469 DC_LOG_DC("dmcu is not initialized");
470 return false;
471 }
472
473 return true;
474}
475#endif
476
477
478
479
480
481
482
483
484
485
486
487
488
489bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
490 struct crc_params *crc_window, bool enable, bool continuous)
491{
492 int i;
493 struct pipe_ctx *pipe;
494 struct crc_params param;
495 struct timing_generator *tg;
496
497 for (i = 0; i < MAX_PIPES; i++) {
498 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
499 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
500 break;
501 }
502
503 if (i == MAX_PIPES)
504 return false;
505
506
507 param.windowa_x_start = 0;
508 param.windowa_y_start = 0;
509 param.windowa_x_end = pipe->stream->timing.h_addressable;
510 param.windowa_y_end = pipe->stream->timing.v_addressable;
511 param.windowb_x_start = 0;
512 param.windowb_y_start = 0;
513 param.windowb_x_end = pipe->stream->timing.h_addressable;
514 param.windowb_y_end = pipe->stream->timing.v_addressable;
515
516 if (crc_window) {
517 param.windowa_x_start = crc_window->windowa_x_start;
518 param.windowa_y_start = crc_window->windowa_y_start;
519 param.windowa_x_end = crc_window->windowa_x_end;
520 param.windowa_y_end = crc_window->windowa_y_end;
521 param.windowb_x_start = crc_window->windowb_x_start;
522 param.windowb_y_start = crc_window->windowb_y_start;
523 param.windowb_x_end = crc_window->windowb_x_end;
524 param.windowb_y_end = crc_window->windowb_y_end;
525 }
526
527 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
528 param.odm_mode = pipe->next_odm_pipe ? 1:0;
529
530
531 param.selection = UNION_WINDOW_A_B;
532 param.continuous_mode = continuous;
533 param.enable = enable;
534
535 tg = pipe->stream_res.tg;
536
537
538 if (tg->funcs->configure_crc)
539 return tg->funcs->configure_crc(tg, ¶m);
540 DC_LOG_WARNING("CRC capture not supported.");
541 return false;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
556 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
557{
558 int i;
559 struct pipe_ctx *pipe;
560 struct timing_generator *tg;
561
562 for (i = 0; i < MAX_PIPES; i++) {
563 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
564 if (pipe->stream == stream)
565 break;
566 }
567
568 if (i == MAX_PIPES)
569 return false;
570
571 tg = pipe->stream_res.tg;
572
573 if (tg->funcs->get_crc)
574 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
575 DC_LOG_WARNING("CRC capture not supported.");
576 return false;
577}
578
579void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
580 enum dc_dynamic_expansion option)
581{
582
583 int i;
584 struct pipe_ctx *pipe_ctx;
585
586 for (i = 0; i < MAX_PIPES; i++) {
587 if (dc->current_state->res_ctx.pipe_ctx[i].stream
588 == stream) {
589 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
590 pipe_ctx->stream_res.opp->dyn_expansion = option;
591 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
592 pipe_ctx->stream_res.opp,
593 COLOR_SPACE_YCBCR601,
594 stream->timing.display_color_depth,
595 stream->signal);
596 }
597 }
598}
599
600void dc_stream_set_dither_option(struct dc_stream_state *stream,
601 enum dc_dither_option option)
602{
603 struct bit_depth_reduction_params params;
604 struct dc_link *link = stream->link;
605 struct pipe_ctx *pipes = NULL;
606 int i;
607
608 for (i = 0; i < MAX_PIPES; i++) {
609 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
610 stream) {
611 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
612 break;
613 }
614 }
615
616 if (!pipes)
617 return;
618 if (option > DITHER_OPTION_MAX)
619 return;
620
621 stream->dither_option = option;
622
623 memset(¶ms, 0, sizeof(params));
624 resource_build_bit_depth_reduction_params(stream, ¶ms);
625 stream->bit_depth_params = params;
626
627 if (pipes->plane_res.xfm &&
628 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
629 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
630 pipes->plane_res.xfm,
631 pipes->plane_res.scl_data.lb_params.depth,
632 &stream->bit_depth_params);
633 }
634
635 pipes->stream_res.opp->funcs->
636 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
637}
638
639bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
640{
641 int i;
642 bool ret = false;
643 struct pipe_ctx *pipes;
644
645 for (i = 0; i < MAX_PIPES; i++) {
646 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
647 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
648 dc->hwss.program_gamut_remap(pipes);
649 ret = true;
650 }
651 }
652
653 return ret;
654}
655
656bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
657{
658 int i;
659 bool ret = false;
660 struct pipe_ctx *pipes;
661
662 for (i = 0; i < MAX_PIPES; i++) {
663 if (dc->current_state->res_ctx.pipe_ctx[i].stream
664 == stream) {
665
666 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
667 dc->hwss.program_output_csc(dc,
668 pipes,
669 stream->output_color_space,
670 stream->csc_color_matrix.matrix,
671 pipes->stream_res.opp->inst);
672 ret = true;
673 }
674 }
675
676 return ret;
677}
678
679void dc_stream_set_static_screen_params(struct dc *dc,
680 struct dc_stream_state **streams,
681 int num_streams,
682 const struct dc_static_screen_params *params)
683{
684 int i, j;
685 struct pipe_ctx *pipes_affected[MAX_PIPES];
686 int num_pipes_affected = 0;
687
688 for (i = 0; i < num_streams; i++) {
689 struct dc_stream_state *stream = streams[i];
690
691 for (j = 0; j < MAX_PIPES; j++) {
692 if (dc->current_state->res_ctx.pipe_ctx[j].stream
693 == stream) {
694 pipes_affected[num_pipes_affected++] =
695 &dc->current_state->res_ctx.pipe_ctx[j];
696 }
697 }
698 }
699
700 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
701}
702
703static void dc_destruct(struct dc *dc)
704{
705 if (dc->current_state) {
706 dc_release_state(dc->current_state);
707 dc->current_state = NULL;
708 }
709
710 destroy_links(dc);
711
712 if (dc->clk_mgr) {
713 dc_destroy_clk_mgr(dc->clk_mgr);
714 dc->clk_mgr = NULL;
715 }
716
717 dc_destroy_resource_pool(dc);
718
719 if (dc->ctx->gpio_service)
720 dal_gpio_service_destroy(&dc->ctx->gpio_service);
721
722 if (dc->ctx->created_bios)
723 dal_bios_parser_destroy(&dc->ctx->dc_bios);
724
725 dc_perf_trace_destroy(&dc->ctx->perf_trace);
726
727 kfree(dc->ctx);
728 dc->ctx = NULL;
729
730 kfree(dc->bw_vbios);
731 dc->bw_vbios = NULL;
732
733 kfree(dc->bw_dceip);
734 dc->bw_dceip = NULL;
735
736#ifdef CONFIG_DRM_AMD_DC_DCN
737 kfree(dc->dcn_soc);
738 dc->dcn_soc = NULL;
739
740 kfree(dc->dcn_ip);
741 dc->dcn_ip = NULL;
742
743#endif
744 kfree(dc->vm_helper);
745 dc->vm_helper = NULL;
746
747}
748
749static bool dc_construct_ctx(struct dc *dc,
750 const struct dc_init_data *init_params)
751{
752 struct dc_context *dc_ctx;
753 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
754
755 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
756 if (!dc_ctx)
757 return false;
758
759 dc_ctx->cgs_device = init_params->cgs_device;
760 dc_ctx->driver_context = init_params->driver;
761 dc_ctx->dc = dc;
762 dc_ctx->asic_id = init_params->asic_id;
763 dc_ctx->dc_sink_id_count = 0;
764 dc_ctx->dc_stream_id_count = 0;
765 dc_ctx->dce_environment = init_params->dce_environment;
766
767
768
769 dc_version = resource_parse_asic_id(init_params->asic_id);
770 dc_ctx->dce_version = dc_version;
771
772 dc_ctx->perf_trace = dc_perf_trace_create();
773 if (!dc_ctx->perf_trace) {
774 ASSERT_CRITICAL(false);
775 return false;
776 }
777
778 dc->ctx = dc_ctx;
779
780 return true;
781}
782
783static bool dc_construct(struct dc *dc,
784 const struct dc_init_data *init_params)
785{
786 struct dc_context *dc_ctx;
787 struct bw_calcs_dceip *dc_dceip;
788 struct bw_calcs_vbios *dc_vbios;
789#ifdef CONFIG_DRM_AMD_DC_DCN
790 struct dcn_soc_bounding_box *dcn_soc;
791 struct dcn_ip_params *dcn_ip;
792#endif
793
794 dc->config = init_params->flags;
795
796
797 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
798 if (!dc->vm_helper) {
799 dm_error("%s: failed to create dc->vm_helper\n", __func__);
800 goto fail;
801 }
802
803 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
804
805 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
806 if (!dc_dceip) {
807 dm_error("%s: failed to create dceip\n", __func__);
808 goto fail;
809 }
810
811 dc->bw_dceip = dc_dceip;
812
813 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
814 if (!dc_vbios) {
815 dm_error("%s: failed to create vbios\n", __func__);
816 goto fail;
817 }
818
819 dc->bw_vbios = dc_vbios;
820#ifdef CONFIG_DRM_AMD_DC_DCN
821 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
822 if (!dcn_soc) {
823 dm_error("%s: failed to create dcn_soc\n", __func__);
824 goto fail;
825 }
826
827 dc->dcn_soc = dcn_soc;
828
829 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
830 if (!dcn_ip) {
831 dm_error("%s: failed to create dcn_ip\n", __func__);
832 goto fail;
833 }
834
835 dc->dcn_ip = dcn_ip;
836#endif
837
838 if (!dc_construct_ctx(dc, init_params)) {
839 dm_error("%s: failed to create ctx\n", __func__);
840 goto fail;
841 }
842
843 dc_ctx = dc->ctx;
844
845
846
847
848 if (init_params->vbios_override)
849 dc_ctx->dc_bios = init_params->vbios_override;
850 else {
851
852 struct bp_init_data bp_init_data;
853
854 bp_init_data.ctx = dc_ctx;
855 bp_init_data.bios = init_params->asic_id.atombios_base_address;
856
857 dc_ctx->dc_bios = dal_bios_parser_create(
858 &bp_init_data, dc_ctx->dce_version);
859
860 if (!dc_ctx->dc_bios) {
861 ASSERT_CRITICAL(false);
862 goto fail;
863 }
864
865 dc_ctx->created_bios = true;
866 }
867
868 dc->vendor_signature = init_params->vendor_signature;
869
870
871 dc_ctx->gpio_service = dal_gpio_service_create(
872 dc_ctx->dce_version,
873 dc_ctx->dce_environment,
874 dc_ctx);
875
876 if (!dc_ctx->gpio_service) {
877 ASSERT_CRITICAL(false);
878 goto fail;
879 }
880
881 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
882 if (!dc->res_pool)
883 goto fail;
884
885
886 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
887 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
888
889 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
890 if (!dc->clk_mgr)
891 goto fail;
892#ifdef CONFIG_DRM_AMD_DC_DCN
893 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
894#endif
895
896 if (dc->res_pool->funcs->update_bw_bounding_box)
897 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
898
899
900
901
902
903
904 dc->current_state = dc_create_state(dc);
905
906 if (!dc->current_state) {
907 dm_error("%s: failed to create validate ctx\n", __func__);
908 goto fail;
909 }
910
911 dc_resource_state_construct(dc, dc->current_state);
912
913 if (!create_links(dc, init_params->num_virtual_links))
914 goto fail;
915
916
917 link_enc_cfg_init(dc, dc->current_state);
918
919 return true;
920
921fail:
922 return false;
923}
924
925static void disable_all_writeback_pipes_for_stream(
926 const struct dc *dc,
927 struct dc_stream_state *stream,
928 struct dc_state *context)
929{
930 int i;
931
932 for (i = 0; i < stream->num_wb_info; i++)
933 stream->writeback_info[i].wb_enabled = false;
934}
935
936static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
937 struct dc_stream_state *stream, bool lock)
938{
939 int i;
940
941
942 if (dc->hwss.interdependent_update_lock)
943 dc->hwss.interdependent_update_lock(dc, context, lock);
944 else {
945 for (i = 0; i < dc->res_pool->pipe_count; i++) {
946 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
947 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
948
949
950 if (stream == pipe_ctx->stream) {
951 if (!pipe_ctx->top_pipe &&
952 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
953 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
954 }
955 }
956 }
957}
958
959static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
960{
961 int i, j;
962 struct dc_state *dangling_context = dc_create_state(dc);
963 struct dc_state *current_ctx;
964
965 if (dangling_context == NULL)
966 return;
967
968 dc_resource_state_copy_construct(dc->current_state, dangling_context);
969
970 for (i = 0; i < dc->res_pool->pipe_count; i++) {
971 struct dc_stream_state *old_stream =
972 dc->current_state->res_ctx.pipe_ctx[i].stream;
973 bool should_disable = true;
974
975 for (j = 0; j < context->stream_count; j++) {
976 if (old_stream == context->streams[j]) {
977 should_disable = false;
978 break;
979 }
980 }
981 if (should_disable && old_stream) {
982 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
983 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
984
985 if (dc->hwss.apply_ctx_for_surface) {
986 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
987 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
988 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
989 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
990 }
991 if (dc->hwss.program_front_end_for_ctx) {
992 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
993 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
994 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
995 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
996 }
997 }
998 }
999
1000 current_ctx = dc->current_state;
1001 dc->current_state = dangling_context;
1002 dc_release_state(current_ctx);
1003}
1004
1005static void disable_vbios_mode_if_required(
1006 struct dc *dc,
1007 struct dc_state *context)
1008{
1009 unsigned int i, j;
1010
1011
1012 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1013 struct dc_stream_state *stream = NULL;
1014 struct dc_link *link = NULL;
1015 struct pipe_ctx *pipe = NULL;
1016
1017 pipe = &context->res_ctx.pipe_ctx[i];
1018 stream = pipe->stream;
1019 if (stream == NULL)
1020 continue;
1021
1022
1023 if (pipe->prev_odm_pipe)
1024 continue;
1025
1026 if (stream->link->local_sink &&
1027 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1028 link = stream->link;
1029 }
1030
1031 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1032 unsigned int enc_inst, tg_inst = 0;
1033 unsigned int pix_clk_100hz;
1034
1035 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1036 if (enc_inst != ENGINE_ID_UNKNOWN) {
1037 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1038 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1039 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1040 dc->res_pool->stream_enc[j]);
1041 break;
1042 }
1043 }
1044
1045 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1046 dc->res_pool->dp_clock_source,
1047 tg_inst, &pix_clk_100hz);
1048
1049 if (link->link_status.link_active) {
1050 uint32_t requested_pix_clk_100hz =
1051 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1052
1053 if (pix_clk_100hz != requested_pix_clk_100hz) {
1054 core_link_disable_stream(pipe);
1055 pipe->stream->dpms_off = false;
1056 }
1057 }
1058 }
1059 }
1060 }
1061}
1062
1063static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1064{
1065 int i;
1066 PERF_TRACE();
1067 for (i = 0; i < MAX_PIPES; i++) {
1068 int count = 0;
1069 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1070
1071 if (!pipe->plane_state)
1072 continue;
1073
1074
1075 while (count < 100000) {
1076
1077 pipe->plane_state->status.is_flip_pending = false;
1078 dc->hwss.update_pending_status(pipe);
1079 if (!pipe->plane_state->status.is_flip_pending)
1080 break;
1081 udelay(1);
1082 count++;
1083 }
1084 ASSERT(!pipe->plane_state->status.is_flip_pending);
1085 }
1086 PERF_TRACE();
1087}
1088
1089
1090
1091
1092
1093struct dc *dc_create(const struct dc_init_data *init_params)
1094{
1095 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1096 unsigned int full_pipe_count;
1097
1098 if (!dc)
1099 return NULL;
1100
1101 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1102 if (!dc_construct_ctx(dc, init_params))
1103 goto destruct_dc;
1104 } else {
1105 if (!dc_construct(dc, init_params))
1106 goto destruct_dc;
1107
1108 full_pipe_count = dc->res_pool->pipe_count;
1109 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1110 full_pipe_count--;
1111 dc->caps.max_streams = min(
1112 full_pipe_count,
1113 dc->res_pool->stream_enc_count);
1114
1115 dc->caps.max_links = dc->link_count;
1116 dc->caps.max_audios = dc->res_pool->audio_count;
1117 dc->caps.linear_pitch_alignment = 64;
1118
1119 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1120
1121 if (dc->res_pool->dmcu != NULL)
1122 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1123 }
1124
1125
1126 dc->versions.dc_ver = DC_VER;
1127
1128 dc->build_id = DC_BUILD_ID;
1129
1130 DC_LOG_DC("Display Core initialized\n");
1131
1132
1133
1134 return dc;
1135
1136destruct_dc:
1137 dc_destruct(dc);
1138 kfree(dc);
1139 return NULL;
1140}
1141
1142static void detect_edp_presence(struct dc *dc)
1143{
1144 struct dc_link *edp_links[MAX_NUM_EDP];
1145 struct dc_link *edp_link = NULL;
1146 enum dc_connection_type type;
1147 int i;
1148 int edp_num;
1149
1150 get_edp_links(dc, edp_links, &edp_num);
1151 if (!edp_num)
1152 return;
1153
1154 for (i = 0; i < edp_num; i++) {
1155 edp_link = edp_links[i];
1156 if (dc->config.edp_not_connected) {
1157 edp_link->edp_sink_present = false;
1158 } else {
1159 dc_link_detect_sink(edp_link, &type);
1160 edp_link->edp_sink_present = (type != dc_connection_none);
1161 }
1162 }
1163}
1164
1165void dc_hardware_init(struct dc *dc)
1166{
1167
1168 detect_edp_presence(dc);
1169 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1170 dc->hwss.init_hw(dc);
1171}
1172
1173void dc_init_callbacks(struct dc *dc,
1174 const struct dc_callback_init *init_params)
1175{
1176#ifdef CONFIG_DRM_AMD_DC_HDCP
1177 dc->ctx->cp_psp = init_params->cp_psp;
1178#endif
1179}
1180
1181void dc_deinit_callbacks(struct dc *dc)
1182{
1183#ifdef CONFIG_DRM_AMD_DC_HDCP
1184 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1185#endif
1186}
1187
1188void dc_destroy(struct dc **dc)
1189{
1190 dc_destruct(*dc);
1191 kfree(*dc);
1192 *dc = NULL;
1193}
1194
1195static void enable_timing_multisync(
1196 struct dc *dc,
1197 struct dc_state *ctx)
1198{
1199 int i, multisync_count = 0;
1200 int pipe_count = dc->res_pool->pipe_count;
1201 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1202
1203 for (i = 0; i < pipe_count; i++) {
1204 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1205 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1206 continue;
1207 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1208 continue;
1209 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1210 multisync_count++;
1211 }
1212
1213 if (multisync_count > 0) {
1214 dc->hwss.enable_per_frame_crtc_position_reset(
1215 dc, multisync_count, multisync_pipes);
1216 }
1217}
1218
1219static void program_timing_sync(
1220 struct dc *dc,
1221 struct dc_state *ctx)
1222{
1223 int i, j, k;
1224 int group_index = 0;
1225 int num_group = 0;
1226 int pipe_count = dc->res_pool->pipe_count;
1227 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1228
1229 for (i = 0; i < pipe_count; i++) {
1230 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1231 continue;
1232
1233 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1234 }
1235
1236 for (i = 0; i < pipe_count; i++) {
1237 int group_size = 1;
1238 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1239 struct pipe_ctx *pipe_set[MAX_PIPES];
1240
1241 if (!unsynced_pipes[i])
1242 continue;
1243
1244 pipe_set[0] = unsynced_pipes[i];
1245 unsynced_pipes[i] = NULL;
1246
1247
1248
1249
1250 for (j = i + 1; j < pipe_count; j++) {
1251 if (!unsynced_pipes[j])
1252 continue;
1253 if (sync_type != TIMING_SYNCHRONIZABLE &&
1254 dc->hwss.enable_vblanks_synchronization &&
1255 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1256 resource_are_vblanks_synchronizable(
1257 unsynced_pipes[j]->stream,
1258 pipe_set[0]->stream)) {
1259 sync_type = VBLANK_SYNCHRONIZABLE;
1260 pipe_set[group_size] = unsynced_pipes[j];
1261 unsynced_pipes[j] = NULL;
1262 group_size++;
1263 } else
1264 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1265 resource_are_streams_timing_synchronizable(
1266 unsynced_pipes[j]->stream,
1267 pipe_set[0]->stream)) {
1268 sync_type = TIMING_SYNCHRONIZABLE;
1269 pipe_set[group_size] = unsynced_pipes[j];
1270 unsynced_pipes[j] = NULL;
1271 group_size++;
1272 }
1273 }
1274
1275
1276 for (j = 0; j < group_size; j++) {
1277 bool is_blanked;
1278
1279 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1280 is_blanked =
1281 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1282 else
1283 is_blanked =
1284 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1285 if (!is_blanked) {
1286 if (j == 0)
1287 break;
1288
1289 swap(pipe_set[0], pipe_set[j]);
1290 break;
1291 }
1292 }
1293
1294 for (k = 0; k < group_size; k++) {
1295 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1296
1297 status->timing_sync_info.group_id = num_group;
1298 status->timing_sync_info.group_size = group_size;
1299 if (k == 0)
1300 status->timing_sync_info.master = true;
1301 else
1302 status->timing_sync_info.master = false;
1303
1304 }
1305
1306 for (j = j + 1; j < group_size; j++) {
1307 bool is_blanked;
1308
1309 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1310 is_blanked =
1311 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1312 else
1313 is_blanked =
1314 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1315 if (!is_blanked) {
1316 group_size--;
1317 pipe_set[j] = pipe_set[group_size];
1318 j--;
1319 }
1320 }
1321
1322 if (group_size > 1) {
1323 if (sync_type == TIMING_SYNCHRONIZABLE) {
1324 dc->hwss.enable_timing_synchronization(
1325 dc, group_index, group_size, pipe_set);
1326 } else
1327 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1328 dc->hwss.enable_vblanks_synchronization(
1329 dc, group_index, group_size, pipe_set);
1330 }
1331 group_index++;
1332 }
1333 num_group++;
1334 }
1335}
1336
1337static bool context_changed(
1338 struct dc *dc,
1339 struct dc_state *context)
1340{
1341 uint8_t i;
1342
1343 if (context->stream_count != dc->current_state->stream_count)
1344 return true;
1345
1346 for (i = 0; i < dc->current_state->stream_count; i++) {
1347 if (dc->current_state->streams[i] != context->streams[i])
1348 return true;
1349 }
1350
1351 return false;
1352}
1353
1354bool dc_validate_seamless_boot_timing(const struct dc *dc,
1355 const struct dc_sink *sink,
1356 struct dc_crtc_timing *crtc_timing)
1357{
1358 struct timing_generator *tg;
1359 struct stream_encoder *se = NULL;
1360
1361 struct dc_crtc_timing hw_crtc_timing = {0};
1362
1363 struct dc_link *link = sink->link;
1364 unsigned int i, enc_inst, tg_inst = 0;
1365
1366
1367 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1368 return false;
1369 }
1370
1371
1372 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1373 return false;
1374
1375 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1376
1377 if (enc_inst == ENGINE_ID_UNKNOWN)
1378 return false;
1379
1380 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1381 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1382
1383 se = dc->res_pool->stream_enc[i];
1384
1385 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1386 dc->res_pool->stream_enc[i]);
1387 break;
1388 }
1389 }
1390
1391
1392 if (i == dc->res_pool->stream_enc_count)
1393 return false;
1394
1395 if (tg_inst >= dc->res_pool->timing_generator_count)
1396 return false;
1397
1398 tg = dc->res_pool->timing_generators[tg_inst];
1399
1400 if (!tg->funcs->get_hw_timing)
1401 return false;
1402
1403 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1404 return false;
1405
1406 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1407 return false;
1408
1409 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1410 return false;
1411
1412 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1413 return false;
1414
1415 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1416 return false;
1417
1418 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1419 return false;
1420
1421 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1422 return false;
1423
1424 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1425 return false;
1426
1427 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1428 return false;
1429
1430 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1431 return false;
1432
1433 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1434 return false;
1435
1436 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1437 return false;
1438
1439 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1440 return false;
1441
1442
1443 if (crtc_timing->flags.DSC)
1444 return false;
1445
1446 if (dc_is_dp_signal(link->connector_signal)) {
1447 unsigned int pix_clk_100hz;
1448
1449 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1450 dc->res_pool->dp_clock_source,
1451 tg_inst, &pix_clk_100hz);
1452
1453 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1454 return false;
1455
1456 if (!se->funcs->dp_get_pixel_format)
1457 return false;
1458
1459 if (!se->funcs->dp_get_pixel_format(
1460 se,
1461 &hw_crtc_timing.pixel_encoding,
1462 &hw_crtc_timing.display_color_depth))
1463 return false;
1464
1465 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1466 return false;
1467
1468 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1469 return false;
1470 }
1471
1472 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1473 return false;
1474 }
1475
1476 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1477 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1478 return false;
1479 }
1480
1481 return true;
1482}
1483
1484void dc_enable_stereo(
1485 struct dc *dc,
1486 struct dc_state *context,
1487 struct dc_stream_state *streams[],
1488 uint8_t stream_count)
1489{
1490 int i, j;
1491 struct pipe_ctx *pipe;
1492
1493 for (i = 0; i < MAX_PIPES; i++) {
1494 if (context != NULL)
1495 pipe = &context->res_ctx.pipe_ctx[i];
1496 else
1497 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1498 for (j = 0 ; pipe && j < stream_count; j++) {
1499 if (streams[j] && streams[j] == pipe->stream &&
1500 dc->hwss.setup_stereo)
1501 dc->hwss.setup_stereo(pipe, dc);
1502 }
1503 }
1504}
1505
1506void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1507{
1508 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1509 enable_timing_multisync(dc, context);
1510 program_timing_sync(dc, context);
1511 }
1512}
1513
1514static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1515{
1516 int i;
1517 unsigned int stream_mask = 0;
1518
1519 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1520 if (context->res_ctx.pipe_ctx[i].stream)
1521 stream_mask |= 1 << i;
1522 }
1523
1524 return stream_mask;
1525}
1526
1527#if defined(CONFIG_DRM_AMD_DC_DCN)
1528void dc_z10_restore(struct dc *dc)
1529{
1530 if (dc->hwss.z10_restore)
1531 dc->hwss.z10_restore(dc);
1532}
1533
1534void dc_z10_save_init(struct dc *dc)
1535{
1536 if (dc->hwss.z10_save_init)
1537 dc->hwss.z10_save_init(dc);
1538}
1539#endif
1540
1541
1542
1543
1544static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1545{
1546 struct dc_bios *dcb = dc->ctx->dc_bios;
1547 enum dc_status result = DC_ERROR_UNEXPECTED;
1548 struct pipe_ctx *pipe;
1549 int i, k, l;
1550 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1551
1552#if defined(CONFIG_DRM_AMD_DC_DCN)
1553 dc_z10_restore(dc);
1554 dc_allow_idle_optimizations(dc, false);
1555#endif
1556
1557 for (i = 0; i < context->stream_count; i++)
1558 dc_streams[i] = context->streams[i];
1559
1560 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1561 disable_vbios_mode_if_required(dc, context);
1562 dc->hwss.enable_accelerated_mode(dc, context);
1563 }
1564
1565 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1566 context->stream_count == 0)
1567 dc->hwss.prepare_bandwidth(dc, context);
1568
1569 disable_dangling_plane(dc, context);
1570
1571
1572
1573 if (dc->hwss.apply_ctx_for_surface) {
1574 for (i = 0; i < context->stream_count; i++) {
1575 if (context->streams[i]->mode_changed)
1576 continue;
1577 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1578 dc->hwss.apply_ctx_for_surface(
1579 dc, context->streams[i],
1580 context->stream_status[i].plane_count,
1581 context);
1582 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1583 dc->hwss.post_unlock_program_front_end(dc, context);
1584 }
1585 }
1586
1587
1588 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1589 pipe = &context->res_ctx.pipe_ctx[i];
1590 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1591 }
1592
1593 result = dc->hwss.apply_ctx_to_hw(dc, context);
1594
1595 if (result != DC_OK)
1596 return result;
1597
1598 dc_trigger_sync(dc, context);
1599
1600
1601 if (dc->hwss.program_front_end_for_ctx) {
1602 dc->hwss.interdependent_update_lock(dc, context, true);
1603 dc->hwss.program_front_end_for_ctx(dc, context);
1604 dc->hwss.interdependent_update_lock(dc, context, false);
1605 dc->hwss.post_unlock_program_front_end(dc, context);
1606 }
1607 for (i = 0; i < context->stream_count; i++) {
1608 const struct dc_link *link = context->streams[i]->link;
1609
1610 if (!context->streams[i]->mode_changed)
1611 continue;
1612
1613 if (dc->hwss.apply_ctx_for_surface) {
1614 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1615 dc->hwss.apply_ctx_for_surface(
1616 dc, context->streams[i],
1617 context->stream_status[i].plane_count,
1618 context);
1619 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1620 dc->hwss.post_unlock_program_front_end(dc, context);
1621 }
1622
1623
1624
1625
1626
1627 for (k = 0; k < MAX_PIPES; k++) {
1628 pipe = &context->res_ctx.pipe_ctx[k];
1629
1630 for (l = 0 ; pipe && l < context->stream_count; l++) {
1631 if (context->streams[l] &&
1632 context->streams[l] == pipe->stream &&
1633 dc->hwss.setup_stereo)
1634 dc->hwss.setup_stereo(pipe, dc);
1635 }
1636 }
1637
1638 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1639 context->streams[i]->timing.h_addressable,
1640 context->streams[i]->timing.v_addressable,
1641 context->streams[i]->timing.h_total,
1642 context->streams[i]->timing.v_total,
1643 context->streams[i]->timing.pix_clk_100hz / 10);
1644 }
1645
1646 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1647
1648 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1649 context->stream_count == 0) {
1650
1651 wait_for_no_pipes_pending(dc, context);
1652
1653 dc->hwss.optimize_bandwidth(dc, context);
1654 }
1655
1656 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1657 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1658 else
1659 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1660
1661 context->stream_mask = get_stream_mask(dc, context);
1662
1663 if (context->stream_mask != dc->current_state->stream_mask)
1664 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1665
1666 for (i = 0; i < context->stream_count; i++)
1667 context->streams[i]->mode_changed = false;
1668
1669 dc_release_state(dc->current_state);
1670
1671 dc->current_state = context;
1672
1673 dc_retain_state(dc->current_state);
1674
1675 return result;
1676}
1677
1678bool dc_commit_state(struct dc *dc, struct dc_state *context)
1679{
1680 enum dc_status result = DC_ERROR_UNEXPECTED;
1681 int i;
1682
1683 if (!context_changed(dc, context))
1684 return DC_OK;
1685
1686 DC_LOG_DC("%s: %d streams\n",
1687 __func__, context->stream_count);
1688
1689 for (i = 0; i < context->stream_count; i++) {
1690 struct dc_stream_state *stream = context->streams[i];
1691
1692 dc_stream_log(dc, stream);
1693 }
1694
1695 result = dc_commit_state_no_check(dc, context);
1696
1697 return (result == DC_OK);
1698}
1699
1700#if defined(CONFIG_DRM_AMD_DC_DCN)
1701bool dc_acquire_release_mpc_3dlut(
1702 struct dc *dc, bool acquire,
1703 struct dc_stream_state *stream,
1704 struct dc_3dlut **lut,
1705 struct dc_transfer_func **shaper)
1706{
1707 int pipe_idx;
1708 bool ret = false;
1709 bool found_pipe_idx = false;
1710 const struct resource_pool *pool = dc->res_pool;
1711 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1712 int mpcc_id = 0;
1713
1714 if (pool && res_ctx) {
1715 if (acquire) {
1716
1717 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1718 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1719 found_pipe_idx = true;
1720 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1721 break;
1722 }
1723 }
1724 } else
1725 found_pipe_idx = true;
1726
1727 if (found_pipe_idx) {
1728 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1729 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1730 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1731 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1732 }
1733 }
1734 return ret;
1735}
1736#endif
1737static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1738{
1739 int i;
1740 struct pipe_ctx *pipe;
1741
1742 for (i = 0; i < MAX_PIPES; i++) {
1743 pipe = &context->res_ctx.pipe_ctx[i];
1744
1745 if (!pipe->plane_state)
1746 continue;
1747
1748
1749 pipe->plane_state->status.is_flip_pending = false;
1750 dc->hwss.update_pending_status(pipe);
1751 if (pipe->plane_state->status.is_flip_pending)
1752 return true;
1753 }
1754 return false;
1755}
1756
1757void dc_post_update_surfaces_to_stream(struct dc *dc)
1758{
1759 int i;
1760 struct dc_state *context = dc->current_state;
1761
1762 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1763 return;
1764
1765 post_surface_trace(dc);
1766
1767 if (is_flip_pending_in_pipes(dc, context))
1768 return;
1769
1770 for (i = 0; i < dc->res_pool->pipe_count; i++)
1771 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1772 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1773 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1774 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1775 }
1776
1777 dc->hwss.optimize_bandwidth(dc, context);
1778
1779 dc->optimized_required = false;
1780 dc->wm_optimized_required = false;
1781}
1782
1783static void init_state(struct dc *dc, struct dc_state *context)
1784{
1785
1786
1787
1788
1789#ifdef CONFIG_DRM_AMD_DC_DCN
1790 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1791#endif
1792}
1793
1794struct dc_state *dc_create_state(struct dc *dc)
1795{
1796 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1797 GFP_KERNEL);
1798
1799 if (!context)
1800 return NULL;
1801
1802 init_state(dc, context);
1803
1804 kref_init(&context->refcount);
1805
1806 return context;
1807}
1808
1809struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1810{
1811 int i, j;
1812 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1813
1814 if (!new_ctx)
1815 return NULL;
1816 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1817
1818 for (i = 0; i < MAX_PIPES; i++) {
1819 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1820
1821 if (cur_pipe->top_pipe)
1822 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1823
1824 if (cur_pipe->bottom_pipe)
1825 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1826
1827 if (cur_pipe->prev_odm_pipe)
1828 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1829
1830 if (cur_pipe->next_odm_pipe)
1831 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1832
1833 }
1834
1835 for (i = 0; i < new_ctx->stream_count; i++) {
1836 dc_stream_retain(new_ctx->streams[i]);
1837 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1838 dc_plane_state_retain(
1839 new_ctx->stream_status[i].plane_states[j]);
1840 }
1841
1842 kref_init(&new_ctx->refcount);
1843
1844 return new_ctx;
1845}
1846
1847void dc_retain_state(struct dc_state *context)
1848{
1849 kref_get(&context->refcount);
1850}
1851
1852static void dc_state_free(struct kref *kref)
1853{
1854 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1855 dc_resource_state_destruct(context);
1856 kvfree(context);
1857}
1858
1859void dc_release_state(struct dc_state *context)
1860{
1861 kref_put(&context->refcount, dc_state_free);
1862}
1863
1864bool dc_set_generic_gpio_for_stereo(bool enable,
1865 struct gpio_service *gpio_service)
1866{
1867 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1868 struct gpio_pin_info pin_info;
1869 struct gpio *generic;
1870 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1871 GFP_KERNEL);
1872
1873 if (!config)
1874 return false;
1875 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1876
1877 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1878 kfree(config);
1879 return false;
1880 } else {
1881 generic = dal_gpio_service_create_generic_mux(
1882 gpio_service,
1883 pin_info.offset,
1884 pin_info.mask);
1885 }
1886
1887 if (!generic) {
1888 kfree(config);
1889 return false;
1890 }
1891
1892 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1893
1894 config->enable_output_from_mux = enable;
1895 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1896
1897 if (gpio_result == GPIO_RESULT_OK)
1898 gpio_result = dal_mux_setup_config(generic, config);
1899
1900 if (gpio_result == GPIO_RESULT_OK) {
1901 dal_gpio_close(generic);
1902 dal_gpio_destroy_generic_mux(&generic);
1903 kfree(config);
1904 return true;
1905 } else {
1906 dal_gpio_close(generic);
1907 dal_gpio_destroy_generic_mux(&generic);
1908 kfree(config);
1909 return false;
1910 }
1911}
1912
1913static bool is_surface_in_context(
1914 const struct dc_state *context,
1915 const struct dc_plane_state *plane_state)
1916{
1917 int j;
1918
1919 for (j = 0; j < MAX_PIPES; j++) {
1920 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1921
1922 if (plane_state == pipe_ctx->plane_state) {
1923 return true;
1924 }
1925 }
1926
1927 return false;
1928}
1929
1930static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1931{
1932 union surface_update_flags *update_flags = &u->surface->update_flags;
1933 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1934
1935 if (!u->plane_info)
1936 return UPDATE_TYPE_FAST;
1937
1938 if (u->plane_info->color_space != u->surface->color_space) {
1939 update_flags->bits.color_space_change = 1;
1940 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1941 }
1942
1943 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1944 update_flags->bits.horizontal_mirror_change = 1;
1945 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1946 }
1947
1948 if (u->plane_info->rotation != u->surface->rotation) {
1949 update_flags->bits.rotation_change = 1;
1950 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1951 }
1952
1953 if (u->plane_info->format != u->surface->format) {
1954 update_flags->bits.pixel_format_change = 1;
1955 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1956 }
1957
1958 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1959 update_flags->bits.stereo_format_change = 1;
1960 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1961 }
1962
1963 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1964 update_flags->bits.per_pixel_alpha_change = 1;
1965 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1966 }
1967
1968 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1969 update_flags->bits.global_alpha_change = 1;
1970 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1971 }
1972
1973 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1974 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1975 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1976
1977
1978
1979
1980
1981 update_flags->bits.dcc_change = 1;
1982 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1983 }
1984
1985 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1986 resource_pixel_format_to_bpp(u->surface->format)) {
1987
1988
1989
1990 update_flags->bits.bpp_change = 1;
1991 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1992 }
1993
1994 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1995 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1996 update_flags->bits.plane_size_change = 1;
1997 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1998 }
1999
2000
2001 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2002 sizeof(union dc_tiling_info)) != 0) {
2003 update_flags->bits.swizzle_change = 1;
2004 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2005
2006
2007
2008
2009 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2010
2011
2012
2013 update_flags->bits.bandwidth_change = 1;
2014 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2015 }
2016 }
2017
2018
2019 return update_type;
2020}
2021
2022static enum surface_update_type get_scaling_info_update_type(
2023 const struct dc_surface_update *u)
2024{
2025 union surface_update_flags *update_flags = &u->surface->update_flags;
2026
2027 if (!u->scaling_info)
2028 return UPDATE_TYPE_FAST;
2029
2030 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2031 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2032 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2033 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2034 || u->scaling_info->scaling_quality.integer_scaling !=
2035 u->surface->scaling_quality.integer_scaling
2036 ) {
2037 update_flags->bits.scaling_change = 1;
2038
2039 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2040 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2041 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2042 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2043
2044 update_flags->bits.bandwidth_change = 1;
2045 }
2046
2047 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2048 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2049
2050 update_flags->bits.scaling_change = 1;
2051 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2052 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2053
2054 update_flags->bits.clock_change = 1;
2055 }
2056
2057 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2058 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2059 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2060 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2061 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2062 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2063 update_flags->bits.position_change = 1;
2064
2065 if (update_flags->bits.clock_change
2066 || update_flags->bits.bandwidth_change
2067 || update_flags->bits.scaling_change)
2068 return UPDATE_TYPE_FULL;
2069
2070 if (update_flags->bits.position_change)
2071 return UPDATE_TYPE_MED;
2072
2073 return UPDATE_TYPE_FAST;
2074}
2075
2076static enum surface_update_type det_surface_update(const struct dc *dc,
2077 const struct dc_surface_update *u)
2078{
2079 const struct dc_state *context = dc->current_state;
2080 enum surface_update_type type;
2081 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2082 union surface_update_flags *update_flags = &u->surface->update_flags;
2083
2084 if (u->flip_addr)
2085 update_flags->bits.addr_update = 1;
2086
2087 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2088 update_flags->raw = 0xFFFFFFFF;
2089 return UPDATE_TYPE_FULL;
2090 }
2091
2092 update_flags->raw = 0;
2093
2094 type = get_plane_info_update_type(u);
2095 elevate_update_type(&overall_type, type);
2096
2097 type = get_scaling_info_update_type(u);
2098 elevate_update_type(&overall_type, type);
2099
2100 if (u->flip_addr)
2101 update_flags->bits.addr_update = 1;
2102
2103 if (u->in_transfer_func)
2104 update_flags->bits.in_transfer_func_change = 1;
2105
2106 if (u->input_csc_color_matrix)
2107 update_flags->bits.input_csc_change = 1;
2108
2109 if (u->coeff_reduction_factor)
2110 update_flags->bits.coeff_reduction_change = 1;
2111
2112 if (u->gamut_remap_matrix)
2113 update_flags->bits.gamut_remap_change = 1;
2114
2115 if (u->gamma) {
2116 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2117
2118 if (u->plane_info)
2119 format = u->plane_info->format;
2120 else if (u->surface)
2121 format = u->surface->format;
2122
2123 if (dce_use_lut(format))
2124 update_flags->bits.gamma_change = 1;
2125 }
2126
2127 if (u->hdr_mult.value)
2128 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2129 update_flags->bits.hdr_mult = 1;
2130 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2131 }
2132
2133 if (update_flags->bits.in_transfer_func_change) {
2134 type = UPDATE_TYPE_MED;
2135 elevate_update_type(&overall_type, type);
2136 }
2137
2138 if (update_flags->bits.input_csc_change
2139 || update_flags->bits.coeff_reduction_change
2140 || update_flags->bits.gamma_change
2141 || update_flags->bits.gamut_remap_change) {
2142 type = UPDATE_TYPE_FULL;
2143 elevate_update_type(&overall_type, type);
2144 }
2145
2146 return overall_type;
2147}
2148
2149static enum surface_update_type check_update_surfaces_for_stream(
2150 struct dc *dc,
2151 struct dc_surface_update *updates,
2152 int surface_count,
2153 struct dc_stream_update *stream_update,
2154 const struct dc_stream_status *stream_status)
2155{
2156 int i;
2157 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2158
2159#if defined(CONFIG_DRM_AMD_DC_DCN)
2160 if (dc->idle_optimizations_allowed)
2161 overall_type = UPDATE_TYPE_FULL;
2162
2163#endif
2164 if (stream_status == NULL || stream_status->plane_count != surface_count)
2165 overall_type = UPDATE_TYPE_FULL;
2166
2167 if (stream_update && stream_update->pending_test_pattern) {
2168 overall_type = UPDATE_TYPE_FULL;
2169 }
2170
2171
2172 if (stream_update) {
2173 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2174
2175 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2176 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2177 stream_update->integer_scaling_update)
2178 su_flags->bits.scaling = 1;
2179
2180 if (stream_update->out_transfer_func)
2181 su_flags->bits.out_tf = 1;
2182
2183 if (stream_update->abm_level)
2184 su_flags->bits.abm_level = 1;
2185
2186 if (stream_update->dpms_off)
2187 su_flags->bits.dpms_off = 1;
2188
2189 if (stream_update->gamut_remap)
2190 su_flags->bits.gamut_remap = 1;
2191
2192 if (stream_update->wb_update)
2193 su_flags->bits.wb_update = 1;
2194
2195 if (stream_update->dsc_config)
2196 su_flags->bits.dsc_changed = 1;
2197
2198 if (su_flags->raw != 0)
2199 overall_type = UPDATE_TYPE_FULL;
2200
2201 if (stream_update->output_csc_transform || stream_update->output_color_space)
2202 su_flags->bits.out_csc = 1;
2203 }
2204
2205 for (i = 0 ; i < surface_count; i++) {
2206 enum surface_update_type type =
2207 det_surface_update(dc, &updates[i]);
2208
2209 elevate_update_type(&overall_type, type);
2210 }
2211
2212 return overall_type;
2213}
2214
2215
2216
2217
2218
2219
2220enum surface_update_type dc_check_update_surfaces_for_stream(
2221 struct dc *dc,
2222 struct dc_surface_update *updates,
2223 int surface_count,
2224 struct dc_stream_update *stream_update,
2225 const struct dc_stream_status *stream_status)
2226{
2227 int i;
2228 enum surface_update_type type;
2229
2230 if (stream_update)
2231 stream_update->stream->update_flags.raw = 0;
2232 for (i = 0; i < surface_count; i++)
2233 updates[i].surface->update_flags.raw = 0;
2234
2235 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2236 if (type == UPDATE_TYPE_FULL) {
2237 if (stream_update) {
2238 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2239 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2240 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2241 }
2242 for (i = 0; i < surface_count; i++)
2243 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2244 }
2245
2246 if (type == UPDATE_TYPE_FAST) {
2247
2248 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2249 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2250 dc->optimized_required = true;
2251
2252 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2253 dc->optimized_required = true;
2254 }
2255
2256 dc->optimized_required |= dc->wm_optimized_required;
2257 }
2258
2259 return type;
2260}
2261
2262static struct dc_stream_status *stream_get_status(
2263 struct dc_state *ctx,
2264 struct dc_stream_state *stream)
2265{
2266 uint8_t i;
2267
2268 for (i = 0; i < ctx->stream_count; i++) {
2269 if (stream == ctx->streams[i]) {
2270 return &ctx->stream_status[i];
2271 }
2272 }
2273
2274 return NULL;
2275}
2276
2277static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2278
2279static void copy_surface_update_to_plane(
2280 struct dc_plane_state *surface,
2281 struct dc_surface_update *srf_update)
2282{
2283 if (srf_update->flip_addr) {
2284 surface->address = srf_update->flip_addr->address;
2285 surface->flip_immediate =
2286 srf_update->flip_addr->flip_immediate;
2287 surface->time.time_elapsed_in_us[surface->time.index] =
2288 srf_update->flip_addr->flip_timestamp_in_us -
2289 surface->time.prev_update_time_in_us;
2290 surface->time.prev_update_time_in_us =
2291 srf_update->flip_addr->flip_timestamp_in_us;
2292 surface->time.index++;
2293 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2294 surface->time.index = 0;
2295
2296 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2297 }
2298
2299 if (srf_update->scaling_info) {
2300 surface->scaling_quality =
2301 srf_update->scaling_info->scaling_quality;
2302 surface->dst_rect =
2303 srf_update->scaling_info->dst_rect;
2304 surface->src_rect =
2305 srf_update->scaling_info->src_rect;
2306 surface->clip_rect =
2307 srf_update->scaling_info->clip_rect;
2308 }
2309
2310 if (srf_update->plane_info) {
2311 surface->color_space =
2312 srf_update->plane_info->color_space;
2313 surface->format =
2314 srf_update->plane_info->format;
2315 surface->plane_size =
2316 srf_update->plane_info->plane_size;
2317 surface->rotation =
2318 srf_update->plane_info->rotation;
2319 surface->horizontal_mirror =
2320 srf_update->plane_info->horizontal_mirror;
2321 surface->stereo_format =
2322 srf_update->plane_info->stereo_format;
2323 surface->tiling_info =
2324 srf_update->plane_info->tiling_info;
2325 surface->visible =
2326 srf_update->plane_info->visible;
2327 surface->per_pixel_alpha =
2328 srf_update->plane_info->per_pixel_alpha;
2329 surface->global_alpha =
2330 srf_update->plane_info->global_alpha;
2331 surface->global_alpha_value =
2332 srf_update->plane_info->global_alpha_value;
2333 surface->dcc =
2334 srf_update->plane_info->dcc;
2335 surface->layer_index =
2336 srf_update->plane_info->layer_index;
2337 }
2338
2339 if (srf_update->gamma &&
2340 (surface->gamma_correction !=
2341 srf_update->gamma)) {
2342 memcpy(&surface->gamma_correction->entries,
2343 &srf_update->gamma->entries,
2344 sizeof(struct dc_gamma_entries));
2345 surface->gamma_correction->is_identity =
2346 srf_update->gamma->is_identity;
2347 surface->gamma_correction->num_entries =
2348 srf_update->gamma->num_entries;
2349 surface->gamma_correction->type =
2350 srf_update->gamma->type;
2351 }
2352
2353 if (srf_update->in_transfer_func &&
2354 (surface->in_transfer_func !=
2355 srf_update->in_transfer_func)) {
2356 surface->in_transfer_func->sdr_ref_white_level =
2357 srf_update->in_transfer_func->sdr_ref_white_level;
2358 surface->in_transfer_func->tf =
2359 srf_update->in_transfer_func->tf;
2360 surface->in_transfer_func->type =
2361 srf_update->in_transfer_func->type;
2362 memcpy(&surface->in_transfer_func->tf_pts,
2363 &srf_update->in_transfer_func->tf_pts,
2364 sizeof(struct dc_transfer_func_distributed_points));
2365 }
2366
2367 if (srf_update->func_shaper &&
2368 (surface->in_shaper_func !=
2369 srf_update->func_shaper))
2370 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2371 sizeof(*surface->in_shaper_func));
2372
2373 if (srf_update->lut3d_func &&
2374 (surface->lut3d_func !=
2375 srf_update->lut3d_func))
2376 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2377 sizeof(*surface->lut3d_func));
2378
2379 if (srf_update->hdr_mult.value)
2380 surface->hdr_mult =
2381 srf_update->hdr_mult;
2382
2383 if (srf_update->blend_tf &&
2384 (surface->blend_tf !=
2385 srf_update->blend_tf))
2386 memcpy(surface->blend_tf, srf_update->blend_tf,
2387 sizeof(*surface->blend_tf));
2388
2389 if (srf_update->input_csc_color_matrix)
2390 surface->input_csc_color_matrix =
2391 *srf_update->input_csc_color_matrix;
2392
2393 if (srf_update->coeff_reduction_factor)
2394 surface->coeff_reduction_factor =
2395 *srf_update->coeff_reduction_factor;
2396
2397 if (srf_update->gamut_remap_matrix)
2398 surface->gamut_remap_matrix =
2399 *srf_update->gamut_remap_matrix;
2400}
2401
2402static void copy_stream_update_to_stream(struct dc *dc,
2403 struct dc_state *context,
2404 struct dc_stream_state *stream,
2405 struct dc_stream_update *update)
2406{
2407 struct dc_context *dc_ctx = dc->ctx;
2408
2409 if (update == NULL || stream == NULL)
2410 return;
2411
2412 if (update->src.height && update->src.width)
2413 stream->src = update->src;
2414
2415 if (update->dst.height && update->dst.width)
2416 stream->dst = update->dst;
2417
2418 if (update->out_transfer_func &&
2419 stream->out_transfer_func != update->out_transfer_func) {
2420 stream->out_transfer_func->sdr_ref_white_level =
2421 update->out_transfer_func->sdr_ref_white_level;
2422 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2423 stream->out_transfer_func->type =
2424 update->out_transfer_func->type;
2425 memcpy(&stream->out_transfer_func->tf_pts,
2426 &update->out_transfer_func->tf_pts,
2427 sizeof(struct dc_transfer_func_distributed_points));
2428 }
2429
2430 if (update->hdr_static_metadata)
2431 stream->hdr_static_metadata = *update->hdr_static_metadata;
2432
2433 if (update->abm_level)
2434 stream->abm_level = *update->abm_level;
2435
2436 if (update->periodic_interrupt0)
2437 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2438
2439 if (update->periodic_interrupt1)
2440 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2441
2442 if (update->gamut_remap)
2443 stream->gamut_remap_matrix = *update->gamut_remap;
2444
2445
2446
2447
2448
2449 if (update->output_color_space)
2450 stream->output_color_space = *update->output_color_space;
2451
2452 if (update->output_csc_transform)
2453 stream->csc_color_matrix = *update->output_csc_transform;
2454
2455 if (update->vrr_infopacket)
2456 stream->vrr_infopacket = *update->vrr_infopacket;
2457
2458 if (update->dpms_off)
2459 stream->dpms_off = *update->dpms_off;
2460
2461 if (update->vsc_infopacket)
2462 stream->vsc_infopacket = *update->vsc_infopacket;
2463
2464 if (update->vsp_infopacket)
2465 stream->vsp_infopacket = *update->vsp_infopacket;
2466
2467 if (update->dither_option)
2468 stream->dither_option = *update->dither_option;
2469
2470 if (update->pending_test_pattern)
2471 stream->test_pattern = *update->pending_test_pattern;
2472
2473 if (update->wb_update) {
2474 int i;
2475
2476 stream->num_wb_info = update->wb_update->num_wb_info;
2477 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2478 for (i = 0; i < stream->num_wb_info; i++)
2479 stream->writeback_info[i] =
2480 update->wb_update->writeback_info[i];
2481 }
2482 if (update->dsc_config) {
2483 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2484 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2485 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2486 update->dsc_config->num_slices_v != 0);
2487
2488
2489 struct dc_state *dsc_validate_context = dc_create_state(dc);
2490
2491 if (dsc_validate_context) {
2492 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2493
2494 stream->timing.dsc_cfg = *update->dsc_config;
2495 stream->timing.flags.DSC = enable_dsc;
2496 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2497 stream->timing.dsc_cfg = old_dsc_cfg;
2498 stream->timing.flags.DSC = old_dsc_enabled;
2499 update->dsc_config = NULL;
2500 }
2501
2502 dc_release_state(dsc_validate_context);
2503 } else {
2504 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2505 update->dsc_config = NULL;
2506 }
2507 }
2508}
2509
2510static void commit_planes_do_stream_update(struct dc *dc,
2511 struct dc_stream_state *stream,
2512 struct dc_stream_update *stream_update,
2513 enum surface_update_type update_type,
2514 struct dc_state *context)
2515{
2516 int j;
2517
2518
2519 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2520 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2521
2522 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2523
2524 if (stream_update->periodic_interrupt0 &&
2525 dc->hwss.setup_periodic_interrupt)
2526 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2527
2528 if (stream_update->periodic_interrupt1 &&
2529 dc->hwss.setup_periodic_interrupt)
2530 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2531
2532 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2533 stream_update->vrr_infopacket ||
2534 stream_update->vsc_infopacket ||
2535 stream_update->vsp_infopacket) {
2536 resource_build_info_frame(pipe_ctx);
2537 dc->hwss.update_info_frame(pipe_ctx);
2538 }
2539
2540 if (stream_update->hdr_static_metadata &&
2541 stream->use_dynamic_meta &&
2542 dc->hwss.set_dmdata_attributes &&
2543 pipe_ctx->stream->dmdata_address.quad_part != 0)
2544 dc->hwss.set_dmdata_attributes(pipe_ctx);
2545
2546 if (stream_update->gamut_remap)
2547 dc_stream_set_gamut_remap(dc, stream);
2548
2549 if (stream_update->output_csc_transform)
2550 dc_stream_program_csc_matrix(dc, stream);
2551
2552 if (stream_update->dither_option) {
2553 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2554 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2555 &pipe_ctx->stream->bit_depth_params);
2556 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2557 &stream->bit_depth_params,
2558 &stream->clamping);
2559 while (odm_pipe) {
2560 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2561 &stream->bit_depth_params,
2562 &stream->clamping);
2563 odm_pipe = odm_pipe->next_odm_pipe;
2564 }
2565 }
2566
2567
2568
2569 if (update_type == UPDATE_TYPE_FAST)
2570 continue;
2571
2572 if (stream_update->dsc_config)
2573 dp_update_dsc_config(pipe_ctx);
2574
2575 if (stream_update->pending_test_pattern) {
2576 dc_link_dp_set_test_pattern(stream->link,
2577 stream->test_pattern.type,
2578 stream->test_pattern.color_space,
2579 stream->test_pattern.p_link_settings,
2580 stream->test_pattern.p_custom_pattern,
2581 stream->test_pattern.cust_pattern_size);
2582 }
2583
2584 if (stream_update->dpms_off) {
2585 if (*stream_update->dpms_off) {
2586 core_link_disable_stream(pipe_ctx);
2587
2588 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2589 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2590
2591 dc->optimized_required = true;
2592
2593 } else {
2594 if (get_seamless_boot_stream_count(context) == 0)
2595 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2596
2597 core_link_enable_stream(dc->current_state, pipe_ctx);
2598 }
2599 }
2600
2601 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2602 bool should_program_abm = true;
2603
2604
2605 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2606 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2607 should_program_abm = false;
2608
2609 if (should_program_abm) {
2610 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2611 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2612 } else {
2613 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2614 pipe_ctx->stream_res.abm, stream->abm_level);
2615 }
2616 }
2617 }
2618 }
2619 }
2620}
2621
2622static void commit_planes_for_stream(struct dc *dc,
2623 struct dc_surface_update *srf_updates,
2624 int surface_count,
2625 struct dc_stream_state *stream,
2626 struct dc_stream_update *stream_update,
2627 enum surface_update_type update_type,
2628 struct dc_state *context)
2629{
2630 int i, j;
2631 struct pipe_ctx *top_pipe_to_program = NULL;
2632
2633#if defined(CONFIG_DRM_AMD_DC_DCN)
2634 dc_z10_restore(dc);
2635#endif
2636
2637 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2638
2639
2640
2641
2642
2643
2644 if (stream->apply_seamless_boot_optimization) {
2645 stream->apply_seamless_boot_optimization = false;
2646
2647 if (get_seamless_boot_stream_count(context) == 0)
2648 dc->optimized_required = true;
2649 }
2650 }
2651
2652 if (update_type == UPDATE_TYPE_FULL) {
2653#if defined(CONFIG_DRM_AMD_DC_DCN)
2654 dc_allow_idle_optimizations(dc, false);
2655
2656#endif
2657 if (get_seamless_boot_stream_count(context) == 0)
2658 dc->hwss.prepare_bandwidth(dc, context);
2659
2660 context_clock_trace(dc, context);
2661 }
2662
2663 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2664 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2665
2666 if (!pipe_ctx->top_pipe &&
2667 !pipe_ctx->prev_odm_pipe &&
2668 pipe_ctx->stream &&
2669 pipe_ctx->stream == stream) {
2670 top_pipe_to_program = pipe_ctx;
2671 }
2672 }
2673
2674#ifdef CONFIG_DRM_AMD_DC_DCN
2675 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2676 struct pipe_ctx *mpcc_pipe;
2677 struct pipe_ctx *odm_pipe;
2678
2679 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2680 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2681 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2682 }
2683#endif
2684
2685 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2686 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2687 if (should_use_dmub_lock(stream->link)) {
2688 union dmub_hw_lock_flags hw_locks = { 0 };
2689 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2690
2691 hw_locks.bits.lock_dig = 1;
2692 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2693
2694 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2695 true,
2696 &hw_locks,
2697 &inst_flags);
2698 } else
2699 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2700 top_pipe_to_program->stream_res.tg);
2701 }
2702
2703 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2704 dc->hwss.interdependent_update_lock(dc, context, true);
2705 else
2706
2707
2708
2709
2710 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2711
2712
2713 if (stream_update)
2714 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2715
2716 if (surface_count == 0) {
2717
2718
2719
2720
2721 if (dc->hwss.apply_ctx_for_surface)
2722 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2723 if (dc->hwss.program_front_end_for_ctx)
2724 dc->hwss.program_front_end_for_ctx(dc, context);
2725
2726 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2727 dc->hwss.interdependent_update_lock(dc, context, false);
2728 else
2729 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2730 dc->hwss.post_unlock_program_front_end(dc, context);
2731 return;
2732 }
2733
2734 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2735 for (i = 0; i < surface_count; i++) {
2736 struct dc_plane_state *plane_state = srf_updates[i].surface;
2737
2738 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2739 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2740 if (!pipe_ctx->plane_state)
2741 continue;
2742 if (pipe_ctx->plane_state != plane_state)
2743 continue;
2744 plane_state->triplebuffer_flips = false;
2745 if (update_type == UPDATE_TYPE_FAST &&
2746 dc->hwss.program_triplebuffer != NULL &&
2747 !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2748
2749 plane_state->triplebuffer_flips = true;
2750 }
2751 }
2752 if (update_type == UPDATE_TYPE_FULL) {
2753
2754 plane_state->flip_immediate = false;
2755 }
2756 }
2757 }
2758
2759
2760 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2761 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2762
2763 if (!pipe_ctx->top_pipe &&
2764 !pipe_ctx->prev_odm_pipe &&
2765 pipe_ctx->stream &&
2766 pipe_ctx->stream == stream) {
2767 struct dc_stream_status *stream_status = NULL;
2768
2769 if (!pipe_ctx->plane_state)
2770 continue;
2771
2772
2773 if (update_type == UPDATE_TYPE_FAST)
2774 continue;
2775
2776 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2777
2778 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2779
2780 dc->hwss.program_triplebuffer(
2781 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2782 }
2783 stream_status =
2784 stream_get_status(context, pipe_ctx->stream);
2785
2786 if (dc->hwss.apply_ctx_for_surface)
2787 dc->hwss.apply_ctx_for_surface(
2788 dc, pipe_ctx->stream, stream_status->plane_count, context);
2789 }
2790 }
2791 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2792 dc->hwss.program_front_end_for_ctx(dc, context);
2793#ifdef CONFIG_DRM_AMD_DC_DCN
2794 if (dc->debug.validate_dml_output) {
2795 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2796 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2797 if (cur_pipe.stream == NULL)
2798 continue;
2799
2800 cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2801 cur_pipe.plane_res.hubp, dc->ctx,
2802 &context->res_ctx.pipe_ctx[i].rq_regs,
2803 &context->res_ctx.pipe_ctx[i].dlg_regs,
2804 &context->res_ctx.pipe_ctx[i].ttu_regs);
2805 }
2806 }
2807#endif
2808 }
2809
2810
2811 if (update_type == UPDATE_TYPE_FAST) {
2812 if (dc->hwss.set_flip_control_gsl)
2813 for (i = 0; i < surface_count; i++) {
2814 struct dc_plane_state *plane_state = srf_updates[i].surface;
2815
2816 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2817 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2818
2819 if (pipe_ctx->stream != stream)
2820 continue;
2821
2822 if (pipe_ctx->plane_state != plane_state)
2823 continue;
2824
2825
2826 dc->hwss.set_flip_control_gsl(pipe_ctx,
2827 plane_state->flip_immediate);
2828 }
2829 }
2830
2831
2832 for (i = 0; i < surface_count; i++) {
2833 struct dc_plane_state *plane_state = srf_updates[i].surface;
2834
2835 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2836 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2837
2838 if (pipe_ctx->stream != stream)
2839 continue;
2840
2841 if (pipe_ctx->plane_state != plane_state)
2842 continue;
2843
2844 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2845
2846 dc->hwss.program_triplebuffer(
2847 dc, pipe_ctx, plane_state->triplebuffer_flips);
2848 }
2849 if (srf_updates[i].flip_addr)
2850 dc->hwss.update_plane_addr(dc, pipe_ctx);
2851 }
2852 }
2853
2854 }
2855
2856 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
2857 dc->hwss.interdependent_update_lock(dc, context, false);
2858 else
2859 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2860
2861 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2862 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2863 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2864 top_pipe_to_program->stream_res.tg,
2865 CRTC_STATE_VACTIVE);
2866 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2867 top_pipe_to_program->stream_res.tg,
2868 CRTC_STATE_VBLANK);
2869 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
2870 top_pipe_to_program->stream_res.tg,
2871 CRTC_STATE_VACTIVE);
2872
2873 if (stream && should_use_dmub_lock(stream->link)) {
2874 union dmub_hw_lock_flags hw_locks = { 0 };
2875 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2876
2877 hw_locks.bits.lock_dig = 1;
2878 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2879
2880 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2881 false,
2882 &hw_locks,
2883 &inst_flags);
2884 } else
2885 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
2886 top_pipe_to_program->stream_res.tg);
2887 }
2888
2889 if (update_type != UPDATE_TYPE_FAST)
2890 dc->hwss.post_unlock_program_front_end(dc, context);
2891
2892
2893 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2894 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2895
2896 if (!pipe_ctx->plane_state)
2897 continue;
2898
2899 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
2900 !pipe_ctx->stream || pipe_ctx->stream != stream ||
2901 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
2902 pipe_ctx->plane_state->skip_manual_trigger)
2903 continue;
2904
2905 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2906 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2907 }
2908}
2909
2910void dc_commit_updates_for_stream(struct dc *dc,
2911 struct dc_surface_update *srf_updates,
2912 int surface_count,
2913 struct dc_stream_state *stream,
2914 struct dc_stream_update *stream_update,
2915 struct dc_state *state)
2916{
2917 const struct dc_stream_status *stream_status;
2918 enum surface_update_type update_type;
2919 struct dc_state *context;
2920 struct dc_context *dc_ctx = dc->ctx;
2921 int i, j;
2922
2923 stream_status = dc_stream_get_status(stream);
2924 context = dc->current_state;
2925
2926 update_type = dc_check_update_surfaces_for_stream(
2927 dc, srf_updates, surface_count, stream_update, stream_status);
2928
2929 if (update_type >= update_surface_trace_level)
2930 update_surface_trace(dc, srf_updates, surface_count);
2931
2932
2933 if (update_type >= UPDATE_TYPE_FULL) {
2934
2935
2936 context = dc_create_state(dc);
2937 if (context == NULL) {
2938 DC_ERROR("Failed to allocate new validate context!\n");
2939 return;
2940 }
2941
2942 dc_resource_state_copy_construct(state, context);
2943
2944 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2945 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2946 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2947
2948 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2949 new_pipe->plane_state->force_full_update = true;
2950 }
2951 }
2952
2953
2954 for (i = 0; i < surface_count; i++) {
2955 struct dc_plane_state *surface = srf_updates[i].surface;
2956
2957 copy_surface_update_to_plane(surface, &srf_updates[i]);
2958
2959 if (update_type >= UPDATE_TYPE_MED) {
2960 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2961 struct pipe_ctx *pipe_ctx =
2962 &context->res_ctx.pipe_ctx[j];
2963
2964 if (pipe_ctx->plane_state != surface)
2965 continue;
2966
2967 resource_build_scaling_params(pipe_ctx);
2968 }
2969 }
2970 }
2971
2972 copy_stream_update_to_stream(dc, context, stream, stream_update);
2973
2974 if (update_type >= UPDATE_TYPE_FULL) {
2975 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2976 DC_ERROR("Mode validation failed for stream update!\n");
2977 dc_release_state(context);
2978 return;
2979 }
2980 }
2981
2982 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
2983
2984 commit_planes_for_stream(
2985 dc,
2986 srf_updates,
2987 surface_count,
2988 stream,
2989 stream_update,
2990 update_type,
2991 context);
2992
2993 if (dc->current_state != context) {
2994
2995 struct dc_state *old = dc->current_state;
2996
2997 dc->current_state = context;
2998 dc_release_state(old);
2999
3000 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3001 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3002
3003 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3004 pipe_ctx->plane_state->force_full_update = false;
3005 }
3006 }
3007
3008 if (update_type >= UPDATE_TYPE_FULL) {
3009 dc_post_update_surfaces_to_stream(dc);
3010
3011 if (dc_ctx->dce_version >= DCE_VERSION_MAX)
3012 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
3013 else
3014 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3015 }
3016
3017 return;
3018
3019}
3020
3021uint8_t dc_get_current_stream_count(struct dc *dc)
3022{
3023 return dc->current_state->stream_count;
3024}
3025
3026struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3027{
3028 if (i < dc->current_state->stream_count)
3029 return dc->current_state->streams[i];
3030 return NULL;
3031}
3032
3033struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3034{
3035 uint8_t i;
3036 struct dc_context *ctx = link->ctx;
3037
3038 for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3039 if (ctx->dc->current_state->streams[i]->link == link)
3040 return ctx->dc->current_state->streams[i];
3041 }
3042
3043 return NULL;
3044}
3045
3046enum dc_irq_source dc_interrupt_to_irq_source(
3047 struct dc *dc,
3048 uint32_t src_id,
3049 uint32_t ext_id)
3050{
3051 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3052}
3053
3054
3055
3056
3057bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3058{
3059
3060 if (dc == NULL)
3061 return false;
3062
3063 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3064}
3065
3066void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3067{
3068 dal_irq_service_ack(dc->res_pool->irqs, src);
3069}
3070
3071void dc_power_down_on_boot(struct dc *dc)
3072{
3073 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3074 dc->hwss.power_down_on_boot)
3075 dc->hwss.power_down_on_boot(dc);
3076}
3077
3078void dc_set_power_state(
3079 struct dc *dc,
3080 enum dc_acpi_cm_power_state power_state)
3081{
3082 struct kref refcount;
3083 struct display_mode_lib *dml;
3084
3085 if (!dc->current_state)
3086 return;
3087
3088 switch (power_state) {
3089 case DC_ACPI_CM_POWER_STATE_D0:
3090 dc_resource_state_construct(dc, dc->current_state);
3091
3092#if defined(CONFIG_DRM_AMD_DC_DCN)
3093 dc_z10_restore(dc);
3094#endif
3095 if (dc->ctx->dmub_srv)
3096 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3097
3098 dc->hwss.init_hw(dc);
3099
3100 if (dc->hwss.init_sys_ctx != NULL &&
3101 dc->vm_pa_config.valid) {
3102 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3103 }
3104
3105 break;
3106 default:
3107 ASSERT(dc->current_state->stream_count == 0);
3108
3109
3110
3111
3112 dml = kzalloc(sizeof(struct display_mode_lib),
3113 GFP_KERNEL);
3114
3115 ASSERT(dml);
3116 if (!dml)
3117 return;
3118
3119
3120 refcount = dc->current_state->refcount;
3121
3122 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3123
3124 dc_resource_state_destruct(dc->current_state);
3125 memset(dc->current_state, 0,
3126 sizeof(*dc->current_state));
3127
3128 dc->current_state->refcount = refcount;
3129 dc->current_state->bw_ctx.dml = *dml;
3130
3131 kfree(dml);
3132
3133 break;
3134 }
3135}
3136
3137void dc_resume(struct dc *dc)
3138{
3139 uint32_t i;
3140
3141 for (i = 0; i < dc->link_count; i++)
3142 core_link_resume(dc->links[i]);
3143}
3144
3145bool dc_is_dmcu_initialized(struct dc *dc)
3146{
3147 struct dmcu *dmcu = dc->res_pool->dmcu;
3148
3149 if (dmcu)
3150 return dmcu->funcs->is_dmcu_initialized(dmcu);
3151 return false;
3152}
3153
3154bool dc_submit_i2c(
3155 struct dc *dc,
3156 uint32_t link_index,
3157 struct i2c_command *cmd)
3158{
3159
3160 struct dc_link *link = dc->links[link_index];
3161 struct ddc_service *ddc = link->ddc;
3162 return dce_i2c_submit_command(
3163 dc->res_pool,
3164 ddc->ddc_pin,
3165 cmd);
3166}
3167
3168bool dc_submit_i2c_oem(
3169 struct dc *dc,
3170 struct i2c_command *cmd)
3171{
3172 struct ddc_service *ddc = dc->res_pool->oem_device;
3173 return dce_i2c_submit_command(
3174 dc->res_pool,
3175 ddc->ddc_pin,
3176 cmd);
3177}
3178
3179static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3180{
3181 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3182 BREAK_TO_DEBUGGER();
3183 return false;
3184 }
3185
3186 dc_sink_retain(sink);
3187
3188 dc_link->remote_sinks[dc_link->sink_count] = sink;
3189 dc_link->sink_count++;
3190
3191 return true;
3192}
3193
3194
3195
3196
3197
3198
3199struct dc_sink *dc_link_add_remote_sink(
3200 struct dc_link *link,
3201 const uint8_t *edid,
3202 int len,
3203 struct dc_sink_init_data *init_data)
3204{
3205 struct dc_sink *dc_sink;
3206 enum dc_edid_status edid_status;
3207
3208 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3209 dm_error("Max EDID buffer size breached!\n");
3210 return NULL;
3211 }
3212
3213 if (!init_data) {
3214 BREAK_TO_DEBUGGER();
3215 return NULL;
3216 }
3217
3218 if (!init_data->link) {
3219 BREAK_TO_DEBUGGER();
3220 return NULL;
3221 }
3222
3223 dc_sink = dc_sink_create(init_data);
3224
3225 if (!dc_sink)
3226 return NULL;
3227
3228 memmove(dc_sink->dc_edid.raw_edid, edid, len);
3229 dc_sink->dc_edid.length = len;
3230
3231 if (!link_add_remote_sink_helper(
3232 link,
3233 dc_sink))
3234 goto fail_add_sink;
3235
3236 edid_status = dm_helpers_parse_edid_caps(
3237 link->ctx,
3238 &dc_sink->dc_edid,
3239 &dc_sink->edid_caps);
3240
3241
3242
3243
3244
3245 if (edid_status != EDID_OK) {
3246 dc_sink->dc_edid.length = 0;
3247 dm_error("Bad EDID, status%d!\n", edid_status);
3248 }
3249
3250 return dc_sink;
3251
3252fail_add_sink:
3253 dc_sink_release(dc_sink);
3254 return NULL;
3255}
3256
3257
3258
3259
3260
3261
3262
3263void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3264{
3265 int i;
3266
3267 if (!link->sink_count) {
3268 BREAK_TO_DEBUGGER();
3269 return;
3270 }
3271
3272 for (i = 0; i < link->sink_count; i++) {
3273 if (link->remote_sinks[i] == sink) {
3274 dc_sink_release(sink);
3275 link->remote_sinks[i] = NULL;
3276
3277
3278 while (i < link->sink_count - 1) {
3279 link->remote_sinks[i] = link->remote_sinks[i+1];
3280 i++;
3281 }
3282 link->remote_sinks[i] = NULL;
3283 link->sink_count--;
3284 return;
3285 }
3286 }
3287}
3288
3289void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3290{
3291 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3292 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3293 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3294 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3295 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3296 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3297 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3298 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3299 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3300}
3301enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3302{
3303 if (dc->hwss.set_clock)
3304 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3305 return DC_ERROR_UNEXPECTED;
3306}
3307void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3308{
3309 if (dc->hwss.get_clock)
3310 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3311}
3312
3313
3314bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3315{
3316 int i;
3317
3318 for (i = 0; i < dc->current_state->stream_count ; i++) {
3319 struct dc_link *link;
3320 struct dc_stream_state *stream = dc->current_state->streams[i];
3321
3322 link = stream->link;
3323 if (!link)
3324 continue;
3325
3326 if (link->psr_settings.psr_feature_enabled) {
3327 if (enable && !link->psr_settings.psr_allow_active) {
3328 if (!dc_link_set_psr_allow_active(link, true, false, false))
3329 return false;
3330 } else if (!enable && link->psr_settings.psr_allow_active) {
3331 if (!dc_link_set_psr_allow_active(link, false, true, false))
3332 return false;
3333 }
3334 }
3335 }
3336
3337 return true;
3338}
3339
3340#if defined(CONFIG_DRM_AMD_DC_DCN)
3341
3342void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3343{
3344 if (dc->debug.disable_idle_power_optimizations)
3345 return;
3346
3347 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3348 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3349 return;
3350
3351 if (allow == dc->idle_optimizations_allowed)
3352 return;
3353
3354 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3355 dc->idle_optimizations_allowed = allow;
3356}
3357
3358
3359
3360
3361
3362void dc_unlock_memory_clock_frequency(struct dc *dc)
3363{
3364 unsigned int i;
3365
3366 for (i = 0; i < MAX_PIPES; i++)
3367 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3368 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3369
3370 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3371 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3372}
3373
3374
3375
3376
3377
3378void dc_lock_memory_clock_frequency(struct dc *dc)
3379{
3380 unsigned int i;
3381
3382 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3383 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3384 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3385
3386 for (i = 0; i < MAX_PIPES; i++)
3387 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3388 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3389}
3390
3391bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3392 struct dc_cursor_attributes *cursor_attr)
3393{
3394 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3395 return true;
3396 return false;
3397}
3398
3399
3400void dc_hardware_release(struct dc *dc)
3401{
3402 if (dc->hwss.hardware_release)
3403 dc->hwss.hardware_release(dc);
3404}
3405#endif
3406
3407
3408
3409
3410
3411
3412
3413bool dc_enable_dmub_notifications(struct dc *dc)
3414{
3415
3416 return dc->debug.enable_dmub_aux_for_legacy_ddc;
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3429 uint32_t link_index,
3430 struct aux_payload *payload)
3431{
3432 uint8_t action;
3433 union dmub_rb_cmd cmd = {0};
3434 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3435
3436 ASSERT(payload->length <= 16);
3437
3438 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3439 cmd.dp_aux_access.header.payload_bytes = 0;
3440 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3441 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3442 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3443 cmd.dp_aux_access.aux_control.timeout = 0;
3444 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3445 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3446 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3447
3448
3449 if (payload->i2c_over_aux) {
3450 if (payload->write) {
3451 if (payload->mot)
3452 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3453 else
3454 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3455 } else {
3456 if (payload->mot)
3457 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3458 else
3459 action = DP_AUX_REQ_ACTION_I2C_READ;
3460 }
3461 } else {
3462 if (payload->write)
3463 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3464 else
3465 action = DP_AUX_REQ_ACTION_DPCD_READ;
3466 }
3467
3468 cmd.dp_aux_access.aux_control.dpaux.action = action;
3469
3470 if (payload->length && payload->write) {
3471 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3472 payload->data,
3473 payload->length
3474 );
3475 }
3476
3477 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3478 dc_dmub_srv_cmd_execute(dmub_srv);
3479 dc_dmub_srv_wait_idle(dmub_srv);
3480
3481 return true;
3482}
3483
3484
3485
3486
3487
3488void dc_disable_accelerated_mode(struct dc *dc)
3489{
3490 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3491}
3492