1
2
3
4
5
6
7
8
9
10#define DISABLE_BRANCH_PROFILING
11
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/dma-direct.h>
16#include <linux/swiotlb.h>
17#include <linux/mem_encrypt.h>
18#include <linux/device.h>
19#include <linux/kernel.h>
20#include <linux/bitops.h>
21#include <linux/dma-mapping.h>
22#include <linux/virtio_config.h>
23
24#include <asm/tlbflush.h>
25#include <asm/fixmap.h>
26#include <asm/setup.h>
27#include <asm/bootparam.h>
28#include <asm/set_memory.h>
29#include <asm/cacheflush.h>
30#include <asm/processor-flags.h>
31#include <asm/msr.h>
32#include <asm/cmdline.h>
33
34#include "mm_internal.h"
35
36
37
38
39
40
41u64 sme_me_mask __section(".data") = 0;
42u64 sev_status __section(".data") = 0;
43u64 sev_check_data __section(".data") = 0;
44EXPORT_SYMBOL(sme_me_mask);
45DEFINE_STATIC_KEY_FALSE(sev_enable_key);
46EXPORT_SYMBOL_GPL(sev_enable_key);
47
48
49static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
50
51
52
53
54
55
56
57
58
59
60static void __init __sme_early_enc_dec(resource_size_t paddr,
61 unsigned long size, bool enc)
62{
63 void *src, *dst;
64 size_t len;
65
66 if (!sme_me_mask)
67 return;
68
69 wbinvd();
70
71
72
73
74
75 while (size) {
76 len = min_t(size_t, sizeof(sme_early_buffer), size);
77
78
79
80
81
82 src = enc ? early_memremap_decrypted_wp(paddr, len) :
83 early_memremap_encrypted_wp(paddr, len);
84
85 dst = enc ? early_memremap_encrypted(paddr, len) :
86 early_memremap_decrypted(paddr, len);
87
88
89
90
91
92
93 BUG_ON(!src || !dst);
94
95
96
97
98
99 memcpy(sme_early_buffer, src, len);
100 memcpy(dst, sme_early_buffer, len);
101
102 early_memunmap(dst, len);
103 early_memunmap(src, len);
104
105 paddr += len;
106 size -= len;
107 }
108}
109
110void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
111{
112 __sme_early_enc_dec(paddr, size, true);
113}
114
115void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
116{
117 __sme_early_enc_dec(paddr, size, false);
118}
119
120static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
121 bool map)
122{
123 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
124 pmdval_t pmd_flags, pmd;
125
126
127 pmd_flags = __sme_clr(early_pmd_flags);
128
129 do {
130 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
131 __early_make_pgtable((unsigned long)vaddr, pmd);
132
133 vaddr += PMD_SIZE;
134 paddr += PMD_SIZE;
135 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
136 } while (size);
137
138 flush_tlb_local();
139}
140
141void __init sme_unmap_bootdata(char *real_mode_data)
142{
143 struct boot_params *boot_data;
144 unsigned long cmdline_paddr;
145
146 if (!sme_active())
147 return;
148
149
150 boot_data = (struct boot_params *)real_mode_data;
151 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
152
153 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
154
155 if (!cmdline_paddr)
156 return;
157
158 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
159}
160
161void __init sme_map_bootdata(char *real_mode_data)
162{
163 struct boot_params *boot_data;
164 unsigned long cmdline_paddr;
165
166 if (!sme_active())
167 return;
168
169 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
170
171
172 boot_data = (struct boot_params *)real_mode_data;
173 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
174
175 if (!cmdline_paddr)
176 return;
177
178 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
179}
180
181void __init sme_early_init(void)
182{
183 unsigned int i;
184
185 if (!sme_me_mask)
186 return;
187
188 early_pmd_flags = __sme_set(early_pmd_flags);
189
190 __supported_pte_mask = __sme_set(__supported_pte_mask);
191
192
193 for (i = 0; i < ARRAY_SIZE(protection_map); i++)
194 protection_map[i] = pgprot_encrypted(protection_map[i]);
195
196 if (sev_active())
197 swiotlb_force = SWIOTLB_FORCE;
198}
199
200void __init sev_setup_arch(void)
201{
202 phys_addr_t total_mem = memblock_phys_mem_size();
203 unsigned long size;
204
205 if (!sev_active())
206 return;
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226 size = total_mem * 6 / 100;
227 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
228 swiotlb_adjust_size(size);
229}
230
231static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
232{
233 pgprot_t old_prot, new_prot;
234 unsigned long pfn, pa, size;
235 pte_t new_pte;
236
237 switch (level) {
238 case PG_LEVEL_4K:
239 pfn = pte_pfn(*kpte);
240 old_prot = pte_pgprot(*kpte);
241 break;
242 case PG_LEVEL_2M:
243 pfn = pmd_pfn(*(pmd_t *)kpte);
244 old_prot = pmd_pgprot(*(pmd_t *)kpte);
245 break;
246 case PG_LEVEL_1G:
247 pfn = pud_pfn(*(pud_t *)kpte);
248 old_prot = pud_pgprot(*(pud_t *)kpte);
249 break;
250 default:
251 return;
252 }
253
254 new_prot = old_prot;
255 if (enc)
256 pgprot_val(new_prot) |= _PAGE_ENC;
257 else
258 pgprot_val(new_prot) &= ~_PAGE_ENC;
259
260
261 if (pgprot_val(old_prot) == pgprot_val(new_prot))
262 return;
263
264 pa = pfn << PAGE_SHIFT;
265 size = page_level_size(level);
266
267
268
269
270
271
272 clflush_cache_range(__va(pa), size);
273
274
275 if (enc)
276 sme_early_encrypt(pa, size);
277 else
278 sme_early_decrypt(pa, size);
279
280
281 new_pte = pfn_pte(pfn, new_prot);
282 set_pte_atomic(kpte, new_pte);
283}
284
285static int __init early_set_memory_enc_dec(unsigned long vaddr,
286 unsigned long size, bool enc)
287{
288 unsigned long vaddr_end, vaddr_next;
289 unsigned long psize, pmask;
290 int split_page_size_mask;
291 int level, ret;
292 pte_t *kpte;
293
294 vaddr_next = vaddr;
295 vaddr_end = vaddr + size;
296
297 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
298 kpte = lookup_address(vaddr, &level);
299 if (!kpte || pte_none(*kpte)) {
300 ret = 1;
301 goto out;
302 }
303
304 if (level == PG_LEVEL_4K) {
305 __set_clr_pte_enc(kpte, level, enc);
306 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
307 continue;
308 }
309
310 psize = page_level_size(level);
311 pmask = page_level_mask(level);
312
313
314
315
316
317
318
319 if (vaddr == (vaddr & pmask) &&
320 ((vaddr_end - vaddr) >= psize)) {
321 __set_clr_pte_enc(kpte, level, enc);
322 vaddr_next = (vaddr & pmask) + psize;
323 continue;
324 }
325
326
327
328
329
330
331
332 if (level == PG_LEVEL_2M)
333 split_page_size_mask = 0;
334 else
335 split_page_size_mask = 1 << PG_LEVEL_2M;
336
337
338
339
340
341 kernel_physical_mapping_change(__pa(vaddr & pmask),
342 __pa((vaddr_end & pmask) + psize),
343 split_page_size_mask);
344 }
345
346 ret = 0;
347
348out:
349 __flush_tlb_all();
350 return ret;
351}
352
353int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
354{
355 return early_set_memory_enc_dec(vaddr, size, false);
356}
357
358int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
359{
360 return early_set_memory_enc_dec(vaddr, size, true);
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375bool sev_active(void)
376{
377 return sev_status & MSR_AMD64_SEV_ENABLED;
378}
379
380bool sme_active(void)
381{
382 return sme_me_mask && !sev_active();
383}
384EXPORT_SYMBOL_GPL(sev_active);
385
386
387bool noinstr sev_es_active(void)
388{
389 return sev_status & MSR_AMD64_SEV_ES_ENABLED;
390}
391
392
393bool force_dma_unencrypted(struct device *dev)
394{
395
396
397
398 if (sev_active())
399 return true;
400
401
402
403
404
405
406 if (sme_active()) {
407 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
408 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
409 dev->bus_dma_limit);
410
411 if (dma_dev_mask <= dma_enc_mask)
412 return true;
413 }
414
415 return false;
416}
417
418void __init mem_encrypt_free_decrypted_mem(void)
419{
420 unsigned long vaddr, vaddr_end, npages;
421 int r;
422
423 vaddr = (unsigned long)__start_bss_decrypted_unused;
424 vaddr_end = (unsigned long)__end_bss_decrypted;
425 npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
426
427
428
429
430
431 if (mem_encrypt_active()) {
432 r = set_memory_encrypted(vaddr, npages);
433 if (r) {
434 pr_warn("failed to free unused decrypted pages\n");
435 return;
436 }
437 }
438
439 free_init_pages("unused decrypted", vaddr, vaddr_end);
440}
441
442static void print_mem_encrypt_feature_info(void)
443{
444 pr_info("AMD Memory Encryption Features active:");
445
446
447 if (sme_active()) {
448
449
450
451
452 pr_cont(" SME\n");
453 return;
454 }
455
456
457 if (sev_active())
458 pr_cont(" SEV");
459
460
461 if (sev_es_active())
462 pr_cont(" SEV-ES");
463
464 pr_cont("\n");
465}
466
467
468void __init mem_encrypt_init(void)
469{
470 if (!sme_me_mask)
471 return;
472
473
474 swiotlb_update_mem_attributes();
475
476
477
478
479
480 if (sev_active() && !sev_es_active())
481 static_branch_enable(&sev_enable_key);
482
483 print_mem_encrypt_feature_info();
484}
485
486int arch_has_restricted_virtio_memory_access(void)
487{
488 return sev_active();
489}
490EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
491