1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/vmalloc.h>
15#include <linux/jhash.h>
16#include <linux/slab.h>
17#include <linux/sort.h>
18
19#include "tracing_map.h"
20#include "trace.h"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
39{
40 atomic64_add(n, &elt->fields[i].sum);
41}
42
43
44
45
46
47
48
49
50
51
52
53
54
55u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
56{
57 return (u64)atomic64_read(&elt->fields[i].sum);
58}
59
60
61
62
63
64
65
66
67
68
69
70void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n)
71{
72 atomic64_set(&elt->vars[i], n);
73 elt->var_set[i] = true;
74}
75
76
77
78
79
80
81
82
83
84
85bool tracing_map_var_set(struct tracing_map_elt *elt, unsigned int i)
86{
87 return elt->var_set[i];
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102u64 tracing_map_read_var(struct tracing_map_elt *elt, unsigned int i)
103{
104 return (u64)atomic64_read(&elt->vars[i]);
105}
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121u64 tracing_map_read_var_once(struct tracing_map_elt *elt, unsigned int i)
122{
123 elt->var_set[i] = false;
124 return (u64)atomic64_read(&elt->vars[i]);
125}
126
127int tracing_map_cmp_string(void *val_a, void *val_b)
128{
129 char *a = val_a;
130 char *b = val_b;
131
132 return strcmp(a, b);
133}
134
135int tracing_map_cmp_none(void *val_a, void *val_b)
136{
137 return 0;
138}
139
140static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
141{
142 u64 a = atomic64_read((atomic64_t *)val_a);
143 u64 b = atomic64_read((atomic64_t *)val_b);
144
145 return (a > b) ? 1 : ((a < b) ? -1 : 0);
146}
147
148#define DEFINE_TRACING_MAP_CMP_FN(type) \
149static int tracing_map_cmp_##type(void *val_a, void *val_b) \
150{ \
151 type a = (type)(*(u64 *)val_a); \
152 type b = (type)(*(u64 *)val_b); \
153 \
154 return (a > b) ? 1 : ((a < b) ? -1 : 0); \
155}
156
157DEFINE_TRACING_MAP_CMP_FN(s64);
158DEFINE_TRACING_MAP_CMP_FN(u64);
159DEFINE_TRACING_MAP_CMP_FN(s32);
160DEFINE_TRACING_MAP_CMP_FN(u32);
161DEFINE_TRACING_MAP_CMP_FN(s16);
162DEFINE_TRACING_MAP_CMP_FN(u16);
163DEFINE_TRACING_MAP_CMP_FN(s8);
164DEFINE_TRACING_MAP_CMP_FN(u8);
165
166tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
167 int field_is_signed)
168{
169 tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
170
171 switch (field_size) {
172 case 8:
173 if (field_is_signed)
174 fn = tracing_map_cmp_s64;
175 else
176 fn = tracing_map_cmp_u64;
177 break;
178 case 4:
179 if (field_is_signed)
180 fn = tracing_map_cmp_s32;
181 else
182 fn = tracing_map_cmp_u32;
183 break;
184 case 2:
185 if (field_is_signed)
186 fn = tracing_map_cmp_s16;
187 else
188 fn = tracing_map_cmp_u16;
189 break;
190 case 1:
191 if (field_is_signed)
192 fn = tracing_map_cmp_s8;
193 else
194 fn = tracing_map_cmp_u8;
195 break;
196 }
197
198 return fn;
199}
200
201static int tracing_map_add_field(struct tracing_map *map,
202 tracing_map_cmp_fn_t cmp_fn)
203{
204 int ret = -EINVAL;
205
206 if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
207 ret = map->n_fields;
208 map->fields[map->n_fields++].cmp_fn = cmp_fn;
209 }
210
211 return ret;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226int tracing_map_add_sum_field(struct tracing_map *map)
227{
228 return tracing_map_add_field(map, tracing_map_cmp_atomic64);
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243int tracing_map_add_var(struct tracing_map *map)
244{
245 int ret = -EINVAL;
246
247 if (map->n_vars < TRACING_MAP_VARS_MAX)
248 ret = map->n_vars++;
249
250 return ret;
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269int tracing_map_add_key_field(struct tracing_map *map,
270 unsigned int offset,
271 tracing_map_cmp_fn_t cmp_fn)
272
273{
274 int idx = tracing_map_add_field(map, cmp_fn);
275
276 if (idx < 0)
277 return idx;
278
279 map->fields[idx].offset = offset;
280
281 map->key_idx[map->n_keys++] = idx;
282
283 return idx;
284}
285
286static void tracing_map_array_clear(struct tracing_map_array *a)
287{
288 unsigned int i;
289
290 if (!a->pages)
291 return;
292
293 for (i = 0; i < a->n_pages; i++)
294 memset(a->pages[i], 0, PAGE_SIZE);
295}
296
297static void tracing_map_array_free(struct tracing_map_array *a)
298{
299 unsigned int i;
300
301 if (!a)
302 return;
303
304 if (!a->pages)
305 goto free;
306
307 for (i = 0; i < a->n_pages; i++) {
308 if (!a->pages[i])
309 break;
310 free_page((unsigned long)a->pages[i]);
311 }
312
313 kfree(a->pages);
314
315 free:
316 kfree(a);
317}
318
319static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
320 unsigned int entry_size)
321{
322 struct tracing_map_array *a;
323 unsigned int i;
324
325 a = kzalloc(sizeof(*a), GFP_KERNEL);
326 if (!a)
327 return NULL;
328
329 a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
330 a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
331 a->n_pages = n_elts / a->entries_per_page;
332 if (!a->n_pages)
333 a->n_pages = 1;
334 a->entry_shift = fls(a->entries_per_page) - 1;
335 a->entry_mask = (1 << a->entry_shift) - 1;
336
337 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
338 if (!a->pages)
339 goto free;
340
341 for (i = 0; i < a->n_pages; i++) {
342 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
343 if (!a->pages[i])
344 goto free;
345 }
346 out:
347 return a;
348 free:
349 tracing_map_array_free(a);
350 a = NULL;
351
352 goto out;
353}
354
355static void tracing_map_elt_clear(struct tracing_map_elt *elt)
356{
357 unsigned i;
358
359 for (i = 0; i < elt->map->n_fields; i++)
360 if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
361 atomic64_set(&elt->fields[i].sum, 0);
362
363 for (i = 0; i < elt->map->n_vars; i++) {
364 atomic64_set(&elt->vars[i], 0);
365 elt->var_set[i] = false;
366 }
367
368 if (elt->map->ops && elt->map->ops->elt_clear)
369 elt->map->ops->elt_clear(elt);
370}
371
372static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
373{
374 unsigned int i;
375
376 tracing_map_elt_clear(elt);
377
378 for (i = 0; i < elt->map->n_fields; i++) {
379 elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
380
381 if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
382 elt->fields[i].offset = elt->map->fields[i].offset;
383 }
384}
385
386static void tracing_map_elt_free(struct tracing_map_elt *elt)
387{
388 if (!elt)
389 return;
390
391 if (elt->map->ops && elt->map->ops->elt_free)
392 elt->map->ops->elt_free(elt);
393 kfree(elt->fields);
394 kfree(elt->vars);
395 kfree(elt->var_set);
396 kfree(elt->key);
397 kfree(elt);
398}
399
400static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
401{
402 struct tracing_map_elt *elt;
403 int err = 0;
404
405 elt = kzalloc(sizeof(*elt), GFP_KERNEL);
406 if (!elt)
407 return ERR_PTR(-ENOMEM);
408
409 elt->map = map;
410
411 elt->key = kzalloc(map->key_size, GFP_KERNEL);
412 if (!elt->key) {
413 err = -ENOMEM;
414 goto free;
415 }
416
417 elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
418 if (!elt->fields) {
419 err = -ENOMEM;
420 goto free;
421 }
422
423 elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL);
424 if (!elt->vars) {
425 err = -ENOMEM;
426 goto free;
427 }
428
429 elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL);
430 if (!elt->var_set) {
431 err = -ENOMEM;
432 goto free;
433 }
434
435 tracing_map_elt_init_fields(elt);
436
437 if (map->ops && map->ops->elt_alloc) {
438 err = map->ops->elt_alloc(elt);
439 if (err)
440 goto free;
441 }
442 return elt;
443 free:
444 tracing_map_elt_free(elt);
445
446 return ERR_PTR(err);
447}
448
449static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
450{
451 struct tracing_map_elt *elt = NULL;
452 int idx;
453
454 idx = atomic_inc_return(&map->next_elt);
455 if (idx < map->max_elts) {
456 elt = *(TRACING_MAP_ELT(map->elts, idx));
457 if (map->ops && map->ops->elt_init)
458 map->ops->elt_init(elt);
459 }
460
461 return elt;
462}
463
464static void tracing_map_free_elts(struct tracing_map *map)
465{
466 unsigned int i;
467
468 if (!map->elts)
469 return;
470
471 for (i = 0; i < map->max_elts; i++) {
472 tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
473 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
474 }
475
476 tracing_map_array_free(map->elts);
477 map->elts = NULL;
478}
479
480static int tracing_map_alloc_elts(struct tracing_map *map)
481{
482 unsigned int i;
483
484 map->elts = tracing_map_array_alloc(map->max_elts,
485 sizeof(struct tracing_map_elt *));
486 if (!map->elts)
487 return -ENOMEM;
488
489 for (i = 0; i < map->max_elts; i++) {
490 *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
491 if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
492 *(TRACING_MAP_ELT(map->elts, i)) = NULL;
493 tracing_map_free_elts(map);
494
495 return -ENOMEM;
496 }
497 }
498
499 return 0;
500}
501
502static inline bool keys_match(void *key, void *test_key, unsigned key_size)
503{
504 bool match = true;
505
506 if (memcmp(key, test_key, key_size))
507 match = false;
508
509 return match;
510}
511
512static inline struct tracing_map_elt *
513__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
514{
515 u32 idx, key_hash, test_key;
516 int dup_try = 0;
517 struct tracing_map_entry *entry;
518 struct tracing_map_elt *val;
519
520 key_hash = jhash(key, map->key_size, 0);
521 if (key_hash == 0)
522 key_hash = 1;
523 idx = key_hash >> (32 - (map->map_bits + 1));
524
525 while (1) {
526 idx &= (map->map_size - 1);
527 entry = TRACING_MAP_ENTRY(map->map, idx);
528 test_key = entry->key;
529
530 if (test_key && test_key == key_hash) {
531 val = READ_ONCE(entry->val);
532 if (val &&
533 keys_match(key, val->key, map->key_size)) {
534 if (!lookup_only)
535 atomic64_inc(&map->hits);
536 return val;
537 } else if (unlikely(!val)) {
538
539
540
541
542
543
544
545
546
547
548
549
550 dup_try++;
551 if (dup_try > map->map_size) {
552 atomic64_inc(&map->drops);
553 break;
554 }
555 continue;
556 }
557 }
558
559 if (!test_key) {
560 if (lookup_only)
561 break;
562
563 if (!cmpxchg(&entry->key, 0, key_hash)) {
564 struct tracing_map_elt *elt;
565
566 elt = get_free_elt(map);
567 if (!elt) {
568 atomic64_inc(&map->drops);
569 entry->key = 0;
570 break;
571 }
572
573 memcpy(elt->key, key, map->key_size);
574 entry->val = elt;
575 atomic64_inc(&map->hits);
576
577 return entry->val;
578 } else {
579
580
581
582
583 dup_try++;
584 continue;
585 }
586 }
587
588 idx++;
589 }
590
591 return NULL;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
632{
633 return __tracing_map_insert(map, key, false);
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
654{
655 return __tracing_map_insert(map, key, true);
656}
657
658
659
660
661
662
663
664
665
666
667
668void tracing_map_destroy(struct tracing_map *map)
669{
670 if (!map)
671 return;
672
673 tracing_map_free_elts(map);
674
675 tracing_map_array_free(map->map);
676 kfree(map);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690void tracing_map_clear(struct tracing_map *map)
691{
692 unsigned int i;
693
694 atomic_set(&map->next_elt, -1);
695 atomic64_set(&map->hits, 0);
696 atomic64_set(&map->drops, 0);
697
698 tracing_map_array_clear(map->map);
699
700 for (i = 0; i < map->max_elts; i++)
701 tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
702}
703
704static void set_sort_key(struct tracing_map *map,
705 struct tracing_map_sort_key *sort_key)
706{
707 map->sort_key = *sort_key;
708}
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760struct tracing_map *tracing_map_create(unsigned int map_bits,
761 unsigned int key_size,
762 const struct tracing_map_ops *ops,
763 void *private_data)
764{
765 struct tracing_map *map;
766 unsigned int i;
767
768 if (map_bits < TRACING_MAP_BITS_MIN ||
769 map_bits > TRACING_MAP_BITS_MAX)
770 return ERR_PTR(-EINVAL);
771
772 map = kzalloc(sizeof(*map), GFP_KERNEL);
773 if (!map)
774 return ERR_PTR(-ENOMEM);
775
776 map->map_bits = map_bits;
777 map->max_elts = (1 << map_bits);
778 atomic_set(&map->next_elt, -1);
779
780 map->map_size = (1 << (map_bits + 1));
781 map->ops = ops;
782
783 map->private_data = private_data;
784
785 map->map = tracing_map_array_alloc(map->map_size,
786 sizeof(struct tracing_map_entry));
787 if (!map->map)
788 goto free;
789
790 map->key_size = key_size;
791 for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
792 map->key_idx[i] = -1;
793 out:
794 return map;
795 free:
796 tracing_map_destroy(map);
797 map = ERR_PTR(-ENOMEM);
798
799 goto out;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821int tracing_map_init(struct tracing_map *map)
822{
823 int err;
824
825 if (map->n_fields < 2)
826 return -EINVAL;
827
828 err = tracing_map_alloc_elts(map);
829 if (err)
830 return err;
831
832 tracing_map_clear(map);
833
834 return err;
835}
836
837static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
838 const struct tracing_map_sort_entry **b)
839{
840 int ret = 0;
841
842 if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
843 ret = 1;
844
845 return ret;
846}
847
848static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
849 const struct tracing_map_sort_entry **b)
850{
851 const struct tracing_map_elt *elt_a, *elt_b;
852 struct tracing_map_sort_key *sort_key;
853 struct tracing_map_field *field;
854 tracing_map_cmp_fn_t cmp_fn;
855 void *val_a, *val_b;
856 int ret = 0;
857
858 elt_a = (*a)->elt;
859 elt_b = (*b)->elt;
860
861 sort_key = &elt_a->map->sort_key;
862
863 field = &elt_a->fields[sort_key->field_idx];
864 cmp_fn = field->cmp_fn;
865
866 val_a = &elt_a->fields[sort_key->field_idx].sum;
867 val_b = &elt_b->fields[sort_key->field_idx].sum;
868
869 ret = cmp_fn(val_a, val_b);
870 if (sort_key->descending)
871 ret = -ret;
872
873 return ret;
874}
875
876static int cmp_entries_key(const struct tracing_map_sort_entry **a,
877 const struct tracing_map_sort_entry **b)
878{
879 const struct tracing_map_elt *elt_a, *elt_b;
880 struct tracing_map_sort_key *sort_key;
881 struct tracing_map_field *field;
882 tracing_map_cmp_fn_t cmp_fn;
883 void *val_a, *val_b;
884 int ret = 0;
885
886 elt_a = (*a)->elt;
887 elt_b = (*b)->elt;
888
889 sort_key = &elt_a->map->sort_key;
890
891 field = &elt_a->fields[sort_key->field_idx];
892
893 cmp_fn = field->cmp_fn;
894
895 val_a = elt_a->key + field->offset;
896 val_b = elt_b->key + field->offset;
897
898 ret = cmp_fn(val_a, val_b);
899 if (sort_key->descending)
900 ret = -ret;
901
902 return ret;
903}
904
905static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
906{
907 if (!entry)
908 return;
909
910 if (entry->elt_copied)
911 tracing_map_elt_free(entry->elt);
912
913 kfree(entry);
914}
915
916
917
918
919
920
921
922
923void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
924 unsigned int n_entries)
925{
926 unsigned int i;
927
928 for (i = 0; i < n_entries; i++)
929 destroy_sort_entry(entries[i]);
930
931 vfree(entries);
932}
933
934static struct tracing_map_sort_entry *
935create_sort_entry(void *key, struct tracing_map_elt *elt)
936{
937 struct tracing_map_sort_entry *sort_entry;
938
939 sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
940 if (!sort_entry)
941 return NULL;
942
943 sort_entry->key = key;
944 sort_entry->elt = elt;
945
946 return sort_entry;
947}
948
949static void detect_dups(struct tracing_map_sort_entry **sort_entries,
950 int n_entries, unsigned int key_size)
951{
952 unsigned int dups = 0, total_dups = 0;
953 int i;
954 void *key;
955
956 if (n_entries < 2)
957 return;
958
959 sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
960 (int (*)(const void *, const void *))cmp_entries_dup, NULL);
961
962 key = sort_entries[0]->key;
963 for (i = 1; i < n_entries; i++) {
964 if (!memcmp(sort_entries[i]->key, key, key_size)) {
965 dups++; total_dups++;
966 continue;
967 }
968 key = sort_entries[i]->key;
969 dups = 0;
970 }
971
972 WARN_ONCE(total_dups > 0,
973 "Duplicates detected: %d\n", total_dups);
974}
975
976static bool is_key(struct tracing_map *map, unsigned int field_idx)
977{
978 unsigned int i;
979
980 for (i = 0; i < map->n_keys; i++)
981 if (map->key_idx[i] == field_idx)
982 return true;
983 return false;
984}
985
986static void sort_secondary(struct tracing_map *map,
987 const struct tracing_map_sort_entry **entries,
988 unsigned int n_entries,
989 struct tracing_map_sort_key *primary_key,
990 struct tracing_map_sort_key *secondary_key)
991{
992 int (*primary_fn)(const struct tracing_map_sort_entry **,
993 const struct tracing_map_sort_entry **);
994 int (*secondary_fn)(const struct tracing_map_sort_entry **,
995 const struct tracing_map_sort_entry **);
996 unsigned i, start = 0, n_sub = 1;
997
998 if (is_key(map, primary_key->field_idx))
999 primary_fn = cmp_entries_key;
1000 else
1001 primary_fn = cmp_entries_sum;
1002
1003 if (is_key(map, secondary_key->field_idx))
1004 secondary_fn = cmp_entries_key;
1005 else
1006 secondary_fn = cmp_entries_sum;
1007
1008 for (i = 0; i < n_entries - 1; i++) {
1009 const struct tracing_map_sort_entry **a = &entries[i];
1010 const struct tracing_map_sort_entry **b = &entries[i + 1];
1011
1012 if (primary_fn(a, b) == 0) {
1013 n_sub++;
1014 if (i < n_entries - 2)
1015 continue;
1016 }
1017
1018 if (n_sub < 2) {
1019 start = i + 1;
1020 n_sub = 1;
1021 continue;
1022 }
1023
1024 set_sort_key(map, secondary_key);
1025 sort(&entries[start], n_sub,
1026 sizeof(struct tracing_map_sort_entry *),
1027 (int (*)(const void *, const void *))secondary_fn, NULL);
1028 set_sort_key(map, primary_key);
1029
1030 start = i + 1;
1031 n_sub = 1;
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059int tracing_map_sort_entries(struct tracing_map *map,
1060 struct tracing_map_sort_key *sort_keys,
1061 unsigned int n_sort_keys,
1062 struct tracing_map_sort_entry ***sort_entries)
1063{
1064 int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
1065 const struct tracing_map_sort_entry **);
1066 struct tracing_map_sort_entry *sort_entry, **entries;
1067 int i, n_entries, ret;
1068
1069 entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts));
1070 if (!entries)
1071 return -ENOMEM;
1072
1073 for (i = 0, n_entries = 0; i < map->map_size; i++) {
1074 struct tracing_map_entry *entry;
1075
1076 entry = TRACING_MAP_ENTRY(map->map, i);
1077
1078 if (!entry->key || !entry->val)
1079 continue;
1080
1081 entries[n_entries] = create_sort_entry(entry->val->key,
1082 entry->val);
1083 if (!entries[n_entries++]) {
1084 ret = -ENOMEM;
1085 goto free;
1086 }
1087 }
1088
1089 if (n_entries == 0) {
1090 ret = 0;
1091 goto free;
1092 }
1093
1094 if (n_entries == 1) {
1095 *sort_entries = entries;
1096 return 1;
1097 }
1098
1099 detect_dups(entries, n_entries, map->key_size);
1100
1101 if (is_key(map, sort_keys[0].field_idx))
1102 cmp_entries_fn = cmp_entries_key;
1103 else
1104 cmp_entries_fn = cmp_entries_sum;
1105
1106 set_sort_key(map, &sort_keys[0]);
1107
1108 sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
1109 (int (*)(const void *, const void *))cmp_entries_fn, NULL);
1110
1111 if (n_sort_keys > 1)
1112 sort_secondary(map,
1113 (const struct tracing_map_sort_entry **)entries,
1114 n_entries,
1115 &sort_keys[0],
1116 &sort_keys[1]);
1117
1118 *sort_entries = entries;
1119
1120 return n_entries;
1121 free:
1122 tracing_map_destroy_sort_entries(entries, n_entries);
1123
1124 return ret;
1125}
1126