1
2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
4
5
6
7
8
9
10
11#include <asm/page.h>
12#include <asm/bug.h>
13#include <asm/asm-const.h>
14
15
16
17
18
19
20#include <asm/book3s/64/pgtable.h>
21#include <asm/task_size_64.h>
22#include <asm/cpu_has_feature.h>
23
24
25
26
27
28#define SLB_NUM_BOLTED 2
29#define SLB_CACHE_ENTRIES 8
30#define SLB_MIN_SIZE 32
31
32
33#define SLB_ESID_V ASM_CONST(0x0000000008000000)
34
35
36#define SLB_VSID_SHIFT 12
37#define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
38#define SLB_VSID_SHIFT_1T 24
39#define SLB_VSID_SSIZE_SHIFT 62
40#define SLB_VSID_B ASM_CONST(0xc000000000000000)
41#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
42#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
43#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
44#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
45#define SLB_VSID_N ASM_CONST(0x0000000000000200)
46#define SLB_VSID_L ASM_CONST(0x0000000000000100)
47#define SLB_VSID_C ASM_CONST(0x0000000000000080)
48#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
49#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
50#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
51#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
52#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
53#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
54
55#define SLB_VSID_KERNEL (SLB_VSID_KP)
56#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
57
58#define SLBIE_C (0x08000000)
59#define SLBIE_SSIZE_SHIFT 25
60
61
62
63
64
65#define HPTES_PER_GROUP 8
66
67#define HPTE_V_SSIZE_SHIFT 62
68#define HPTE_V_AVPN_SHIFT 7
69#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
70#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
71#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
72#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
73#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
74#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
75#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
76#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
77#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
78#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
79
80
81
82
83#define HPTE_R_3_0_SSIZE_SHIFT 58
84#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
85#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
86#define HPTE_R_TS ASM_CONST(0x4000000000000000)
87#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
88#define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
89#define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
90#define HPTE_R_RPN_SHIFT 12
91#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
92#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
93#define HPTE_R_PP ASM_CONST(0x0000000000000003)
94#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
95#define HPTE_R_N ASM_CONST(0x0000000000000004)
96#define HPTE_R_G ASM_CONST(0x0000000000000008)
97#define HPTE_R_M ASM_CONST(0x0000000000000010)
98#define HPTE_R_I ASM_CONST(0x0000000000000020)
99#define HPTE_R_W ASM_CONST(0x0000000000000040)
100#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
101#define HPTE_R_C ASM_CONST(0x0000000000000080)
102#define HPTE_R_R ASM_CONST(0x0000000000000100)
103#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
104#define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
105#define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
106#define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
107#define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
108
109#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
110#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
111
112
113#define PP_RWXX 0
114#define PP_RWRX 1
115#define PP_RWRW 2
116#define PP_RXRX 3
117#define PP_RXXX (HPTE_R_PP0 | 2)
118
119
120#define TLBIEL_INVAL_SEL_MASK 0xc00
121#define TLBIEL_INVAL_PAGE 0x000
122#define TLBIEL_INVAL_SET_LPID 0x800
123#define TLBIEL_INVAL_SET 0xc00
124#define TLBIEL_INVAL_SET_MASK 0xfff000
125#define TLBIEL_INVAL_SET_SHIFT 12
126
127#define POWER7_TLB_SETS 128
128#define POWER8_TLB_SETS 512
129#define POWER9_TLB_SETS_HASH 256
130#define POWER9_TLB_SETS_RADIX 128
131
132#ifndef __ASSEMBLY__
133
134struct mmu_hash_ops {
135 void (*hpte_invalidate)(unsigned long slot,
136 unsigned long vpn,
137 int bpsize, int apsize,
138 int ssize, int local);
139 long (*hpte_updatepp)(unsigned long slot,
140 unsigned long newpp,
141 unsigned long vpn,
142 int bpsize, int apsize,
143 int ssize, unsigned long flags);
144 void (*hpte_updateboltedpp)(unsigned long newpp,
145 unsigned long ea,
146 int psize, int ssize);
147 long (*hpte_insert)(unsigned long hpte_group,
148 unsigned long vpn,
149 unsigned long prpn,
150 unsigned long rflags,
151 unsigned long vflags,
152 int psize, int apsize,
153 int ssize);
154 long (*hpte_remove)(unsigned long hpte_group);
155 int (*hpte_removebolted)(unsigned long ea,
156 int psize, int ssize);
157 void (*flush_hash_range)(unsigned long number, int local);
158 void (*hugepage_invalidate)(unsigned long vsid,
159 unsigned long addr,
160 unsigned char *hpte_slot_array,
161 int psize, int ssize, int local);
162 int (*resize_hpt)(unsigned long shift);
163
164
165
166
167
168
169
170 void (*hpte_clear_all)(void);
171};
172extern struct mmu_hash_ops mmu_hash_ops;
173
174struct hash_pte {
175 __be64 v;
176 __be64 r;
177};
178
179extern struct hash_pte *htab_address;
180extern unsigned long htab_size_bytes;
181extern unsigned long htab_hash_mask;
182
183
184static inline int shift_to_mmu_psize(unsigned int shift)
185{
186 int psize;
187
188 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
189 if (mmu_psize_defs[psize].shift == shift)
190 return psize;
191 return -1;
192}
193
194static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
195{
196 if (mmu_psize_defs[mmu_psize].shift)
197 return mmu_psize_defs[mmu_psize].shift;
198 BUG();
199}
200
201static inline unsigned int ap_to_shift(unsigned long ap)
202{
203 int psize;
204
205 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
206 if (mmu_psize_defs[psize].ap == ap)
207 return mmu_psize_defs[psize].shift;
208 }
209
210 return -1;
211}
212
213static inline unsigned long get_sllp_encoding(int psize)
214{
215 unsigned long sllp;
216
217 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
218 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
219 return sllp;
220}
221
222#endif
223
224
225
226
227
228
229
230#define MMU_SEGSIZE_256M 0
231#define MMU_SEGSIZE_1T 1
232
233
234
235
236
237
238
239
240
241#define VPN_SHIFT 12
242
243
244
245
246#define LP_SHIFT 12
247#define LP_BITS 8
248#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
249
250#ifndef __ASSEMBLY__
251
252static inline int slb_vsid_shift(int ssize)
253{
254 if (ssize == MMU_SEGSIZE_256M)
255 return SLB_VSID_SHIFT;
256 return SLB_VSID_SHIFT_1T;
257}
258
259static inline int segment_shift(int ssize)
260{
261 if (ssize == MMU_SEGSIZE_256M)
262 return SID_SHIFT;
263 return SID_SHIFT_1T;
264}
265
266
267
268
269
270
271
272
273extern u8 hpte_page_sizes[1 << LP_BITS];
274
275static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
276 bool is_base_size)
277{
278 unsigned int i, lp;
279
280 if (!(h & HPTE_V_LARGE))
281 return 1ul << 12;
282
283
284 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
285 i = hpte_page_sizes[lp];
286 if (!i)
287 return 0;
288 if (!is_base_size)
289 i >>= 4;
290 return 1ul << mmu_psize_defs[i & 0xf].shift;
291}
292
293static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
294{
295 return __hpte_page_size(h, l, 0);
296}
297
298static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
299{
300 return __hpte_page_size(h, l, 1);
301}
302
303
304
305
306extern int mmu_kernel_ssize;
307extern int mmu_highuser_ssize;
308extern u16 mmu_slb_size;
309extern unsigned long tce_alloc_start, tce_alloc_end;
310
311
312
313
314
315
316
317extern int mmu_ci_restrictions;
318
319
320
321
322
323
324static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
325 int ssize)
326{
327 unsigned long v;
328
329
330
331
332
333
334
335
336 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
337 v <<= HPTE_V_AVPN_SHIFT;
338 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
339 return v;
340}
341
342
343
344
345
346
347static inline unsigned long hpte_old_to_new_v(unsigned long v)
348{
349
350 return v & HPTE_V_COMMON_BITS;
351}
352
353static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
354{
355
356 return (r & ~HPTE_R_3_0_SSIZE_MASK) |
357 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
358}
359
360static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
361{
362
363 return (v & HPTE_V_COMMON_BITS) |
364 ((r & HPTE_R_3_0_SSIZE_MASK) <<
365 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
366}
367
368static inline unsigned long hpte_new_to_old_r(unsigned long r)
369{
370
371 return r & ~HPTE_R_3_0_SSIZE_MASK;
372}
373
374static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
375{
376 unsigned long hpte_v;
377
378 hpte_v = be64_to_cpu(hptep->v);
379 if (cpu_has_feature(CPU_FTR_ARCH_300))
380 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
381 return hpte_v;
382}
383
384
385
386
387
388static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
389 int actual_psize, int ssize)
390{
391 unsigned long v;
392 v = hpte_encode_avpn(vpn, base_psize, ssize);
393 if (actual_psize != MMU_PAGE_4K)
394 v |= HPTE_V_LARGE;
395 return v;
396}
397
398
399
400
401
402
403static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
404 int actual_psize)
405{
406
407 if (actual_psize == MMU_PAGE_4K)
408 return pa & HPTE_R_RPN;
409 else {
410 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
411 unsigned int shift = mmu_psize_defs[actual_psize].shift;
412 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
413 }
414}
415
416
417
418
419static inline unsigned long hpt_vpn(unsigned long ea,
420 unsigned long vsid, int ssize)
421{
422 unsigned long mask;
423 int s_shift = segment_shift(ssize);
424
425 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
426 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
427}
428
429
430
431
432static inline unsigned long hpt_hash(unsigned long vpn,
433 unsigned int shift, int ssize)
434{
435 unsigned long mask;
436 unsigned long hash, vsid;
437
438
439 if (ssize == MMU_SEGSIZE_256M) {
440 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
441 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
442 ((vpn & mask) >> (shift - VPN_SHIFT));
443 } else {
444 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
445 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
446 hash = vsid ^ (vsid << 25) ^
447 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
448 }
449 return hash & 0x7fffffffffUL;
450}
451
452#define HPTE_LOCAL_UPDATE 0x1
453#define HPTE_NOHPTE_UPDATE 0x2
454#define HPTE_USE_KERNEL_KEY 0x4
455
456long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
457 unsigned long rlags, unsigned long vflags, int psize, int ssize);
458extern int __hash_page_4K(unsigned long ea, unsigned long access,
459 unsigned long vsid, pte_t *ptep, unsigned long trap,
460 unsigned long flags, int ssize, int subpage_prot);
461extern int __hash_page_64K(unsigned long ea, unsigned long access,
462 unsigned long vsid, pte_t *ptep, unsigned long trap,
463 unsigned long flags, int ssize);
464struct mm_struct;
465unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
466extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
467 unsigned long access, unsigned long trap,
468 unsigned long flags);
469extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
470 unsigned long dsisr);
471void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
472int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
473int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
474 pte_t *ptep, unsigned long trap, unsigned long flags,
475 int ssize, unsigned int shift, unsigned int mmu_psize);
476#ifdef CONFIG_TRANSPARENT_HUGEPAGE
477extern int __hash_page_thp(unsigned long ea, unsigned long access,
478 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
479 unsigned long flags, int ssize, unsigned int psize);
480#else
481static inline int __hash_page_thp(unsigned long ea, unsigned long access,
482 unsigned long vsid, pmd_t *pmdp,
483 unsigned long trap, unsigned long flags,
484 int ssize, unsigned int psize)
485{
486 BUG();
487 return -1;
488}
489#endif
490extern void hash_failure_debug(unsigned long ea, unsigned long access,
491 unsigned long vsid, unsigned long trap,
492 int ssize, int psize, int lpsize,
493 unsigned long pte);
494extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
495 unsigned long pstart, unsigned long prot,
496 int psize, int ssize);
497int htab_remove_mapping(unsigned long vstart, unsigned long vend,
498 int psize, int ssize);
499extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
500extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
501
502extern void hash__setup_new_exec(void);
503
504#ifdef CONFIG_PPC_PSERIES
505void hpte_init_pseries(void);
506#else
507static inline void hpte_init_pseries(void) { }
508#endif
509
510extern void hpte_init_native(void);
511
512struct slb_entry {
513 u64 esid;
514 u64 vsid;
515};
516
517extern void slb_initialize(void);
518void slb_flush_and_restore_bolted(void);
519void slb_flush_all_realmode(void);
520void __slb_restore_bolted_realmode(void);
521void slb_restore_bolted_realmode(void);
522void slb_save_contents(struct slb_entry *slb_ptr);
523void slb_dump_contents(struct slb_entry *slb_ptr);
524
525extern void slb_vmalloc_update(void);
526extern void slb_set_size(u16 size);
527void preload_new_slb_context(unsigned long start, unsigned long sp);
528#endif
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571#define VA_BITS 68
572#define CONTEXT_BITS 19
573#define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
574#define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
575
576#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
577#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
578
579
580
581
582
583
584
585#if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
586#define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
587#else
588#define MAX_KERNEL_CTX_CNT 1
589#endif
590
591#define MAX_VMALLOC_CTX_CNT 1
592#define MAX_IO_CTX_CNT 1
593#define MAX_VMEMMAP_CTX_CNT 1
594
595
596
597
598
599
600
601
602
603
604
605
606
607#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
608
609
610#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
611 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
612
613
614
615
616#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643#define VSID_MULTIPLIER_256M ASM_CONST(12538073)
644#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
645#define VSID_BITS_65_256M (65 - SID_SHIFT)
646
647
648
649#define VSID_MULINV_256M ASM_CONST(665548017062)
650
651#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
652#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
653#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
654#define VSID_MULINV_1T ASM_CONST(209034062)
655
656
657#define VRMA_VSID 0x1ffffffUL
658#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
659
660
661#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
662#define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
663#define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
664#ifndef __ASSEMBLY__
665
666#ifdef CONFIG_PPC_SUBPAGE_PROT
667
668
669
670
671
672
673
674
675
676
677
678struct subpage_prot_table {
679 unsigned long maxaddr;
680 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
681 unsigned int *low_prot[4];
682};
683
684#define SBP_L1_BITS (PAGE_SHIFT - 2)
685#define SBP_L2_BITS (PAGE_SHIFT - 3)
686#define SBP_L1_COUNT (1 << SBP_L1_BITS)
687#define SBP_L2_COUNT (1 << SBP_L2_BITS)
688#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
689#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
690
691extern void subpage_prot_free(struct mm_struct *mm);
692#else
693static inline void subpage_prot_free(struct mm_struct *mm) {}
694#endif
695
696
697
698
699
700
701struct slice_mask {
702 u64 low_slices;
703 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
704};
705
706struct hash_mm_context {
707 u16 user_psize;
708
709
710 unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
711 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
712 unsigned long slb_addr_limit;
713#ifdef CONFIG_PPC_64K_PAGES
714 struct slice_mask mask_64k;
715#endif
716 struct slice_mask mask_4k;
717#ifdef CONFIG_HUGETLB_PAGE
718 struct slice_mask mask_16m;
719 struct slice_mask mask_16g;
720#endif
721
722#ifdef CONFIG_PPC_SUBPAGE_PROT
723 struct subpage_prot_table *spt;
724#endif
725};
726
727#if 0
728
729
730
731
732
733
734#define vsid_scramble(protovsid, size) \
735 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
736
737
738#define vsid_scramble(protovsid, size) \
739 ({ \
740 unsigned long x; \
741 x = (protovsid) * VSID_MULTIPLIER_##size; \
742 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
743 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
744 })
745
746#else
747static inline unsigned long vsid_scramble(unsigned long protovsid,
748 unsigned long vsid_multiplier, int vsid_bits)
749{
750 unsigned long vsid;
751 unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
752
753
754
755 vsid = protovsid * vsid_multiplier;
756 vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
757 return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
758}
759
760#endif
761
762
763static inline int user_segment_size(unsigned long addr)
764{
765
766 if (addr >= (1UL << SID_SHIFT_1T))
767 return mmu_highuser_ssize;
768 return MMU_SEGSIZE_256M;
769}
770
771static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
772 int ssize)
773{
774 unsigned long va_bits = VA_BITS;
775 unsigned long vsid_bits;
776 unsigned long protovsid;
777
778
779
780
781 if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
782 return 0;
783
784 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
785 va_bits = 65;
786
787 if (ssize == MMU_SEGSIZE_256M) {
788 vsid_bits = va_bits - SID_SHIFT;
789 protovsid = (context << ESID_BITS) |
790 ((ea >> SID_SHIFT) & ESID_BITS_MASK);
791 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
792 }
793
794 vsid_bits = va_bits - SID_SHIFT_1T;
795 protovsid = (context << ESID_BITS_1T) |
796 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
797 return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816static inline unsigned long get_kernel_context(unsigned long ea)
817{
818 unsigned long region_id = get_region_id(ea);
819 unsigned long ctx;
820
821
822
823
824 if (region_id == LINEAR_MAP_REGION_ID) {
825
826
827
828 ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
829 } else
830 ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
831 return ctx;
832}
833
834
835
836
837static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
838{
839 unsigned long context;
840
841 if (!is_kernel_addr(ea))
842 return 0;
843
844 context = get_kernel_context(ea);
845 return get_vsid(context, ea, ssize);
846}
847
848unsigned htab_shift_for_mem_size(unsigned long mem_size);
849
850enum slb_index {
851 LINEAR_INDEX = 0,
852 KSTACK_INDEX = 1,
853};
854
855#define slb_esid_mask(ssize) \
856 (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
857
858static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
859 enum slb_index index)
860{
861 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
862}
863
864static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
865 unsigned long flags)
866{
867 return (vsid << slb_vsid_shift(ssize)) | flags |
868 ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
869}
870
871static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
872 unsigned long flags)
873{
874 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
875}
876
877#endif
878#endif
879