linux/block/t10-pi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * t10_pi.c - Functions for generating and verifying T10 Protection
   4 *            Information.
   5 */
   6
   7#include <linux/t10-pi.h>
   8#include <linux/blk-integrity.h>
   9#include <linux/crc-t10dif.h>
  10#include <linux/crc64.h>
  11#include <linux/module.h>
  12#include <net/checksum.h>
  13#include <asm/unaligned.h>
  14
  15typedef __be16 (csum_fn) (void *, unsigned int);
  16
  17static __be16 t10_pi_crc_fn(void *data, unsigned int len)
  18{
  19        return cpu_to_be16(crc_t10dif(data, len));
  20}
  21
  22static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  23{
  24        return (__force __be16)ip_compute_csum(data, len);
  25}
  26
  27/*
  28 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
  29 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  30 * tag.
  31 */
  32static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
  33                csum_fn *fn, enum t10_dif_type type)
  34{
  35        unsigned int i;
  36
  37        for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  38                struct t10_pi_tuple *pi = iter->prot_buf;
  39
  40                pi->guard_tag = fn(iter->data_buf, iter->interval);
  41                pi->app_tag = 0;
  42
  43                if (type == T10_PI_TYPE1_PROTECTION)
  44                        pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
  45                else
  46                        pi->ref_tag = 0;
  47
  48                iter->data_buf += iter->interval;
  49                iter->prot_buf += iter->tuple_size;
  50                iter->seed++;
  51        }
  52
  53        return BLK_STS_OK;
  54}
  55
  56static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
  57                csum_fn *fn, enum t10_dif_type type)
  58{
  59        unsigned int i;
  60
  61        BUG_ON(type == T10_PI_TYPE0_PROTECTION);
  62
  63        for (i = 0 ; i < iter->data_size ; i += iter->interval) {
  64                struct t10_pi_tuple *pi = iter->prot_buf;
  65                __be16 csum;
  66
  67                if (type == T10_PI_TYPE1_PROTECTION ||
  68                    type == T10_PI_TYPE2_PROTECTION) {
  69                        if (pi->app_tag == T10_PI_APP_ESCAPE)
  70                                goto next;
  71
  72                        if (be32_to_cpu(pi->ref_tag) !=
  73                            lower_32_bits(iter->seed)) {
  74                                pr_err("%s: ref tag error at location %llu " \
  75                                       "(rcvd %u)\n", iter->disk_name,
  76                                       (unsigned long long)
  77                                       iter->seed, be32_to_cpu(pi->ref_tag));
  78                                return BLK_STS_PROTECTION;
  79                        }
  80                } else if (type == T10_PI_TYPE3_PROTECTION) {
  81                        if (pi->app_tag == T10_PI_APP_ESCAPE &&
  82                            pi->ref_tag == T10_PI_REF_ESCAPE)
  83                                goto next;
  84                }
  85
  86                csum = fn(iter->data_buf, iter->interval);
  87
  88                if (pi->guard_tag != csum) {
  89                        pr_err("%s: guard tag error at sector %llu " \
  90                               "(rcvd %04x, want %04x)\n", iter->disk_name,
  91                               (unsigned long long)iter->seed,
  92                               be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
  93                        return BLK_STS_PROTECTION;
  94                }
  95
  96next:
  97                iter->data_buf += iter->interval;
  98                iter->prot_buf += iter->tuple_size;
  99                iter->seed++;
 100        }
 101
 102        return BLK_STS_OK;
 103}
 104
 105static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
 106{
 107        return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
 108}
 109
 110static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
 111{
 112        return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
 113}
 114
 115static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
 116{
 117        return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
 118}
 119
 120static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
 121{
 122        return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
 123}
 124
 125/**
 126 * t10_pi_type1_prepare - prepare PI prior submitting request to device
 127 * @rq:              request with PI that should be prepared
 128 *
 129 * For Type 1/Type 2, the virtual start sector is the one that was
 130 * originally submitted by the block layer for the ref_tag usage. Due to
 131 * partitioning, MD/DM cloning, etc. the actual physical start sector is
 132 * likely to be different. Remap protection information to match the
 133 * physical LBA.
 134 */
 135static void t10_pi_type1_prepare(struct request *rq)
 136{
 137        const int tuple_sz = rq->q->integrity.tuple_size;
 138        u32 ref_tag = t10_pi_ref_tag(rq);
 139        struct bio *bio;
 140
 141        __rq_for_each_bio(bio, rq) {
 142                struct bio_integrity_payload *bip = bio_integrity(bio);
 143                u32 virt = bip_get_seed(bip) & 0xffffffff;
 144                struct bio_vec iv;
 145                struct bvec_iter iter;
 146
 147                /* Already remapped? */
 148                if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
 149                        break;
 150
 151                bip_for_each_vec(iv, bip, iter) {
 152                        unsigned int j;
 153                        void *p;
 154
 155                        p = bvec_kmap_local(&iv);
 156                        for (j = 0; j < iv.bv_len; j += tuple_sz) {
 157                                struct t10_pi_tuple *pi = p;
 158
 159                                if (be32_to_cpu(pi->ref_tag) == virt)
 160                                        pi->ref_tag = cpu_to_be32(ref_tag);
 161                                virt++;
 162                                ref_tag++;
 163                                p += tuple_sz;
 164                        }
 165                        kunmap_local(p);
 166                }
 167
 168                bip->bip_flags |= BIP_MAPPED_INTEGRITY;
 169        }
 170}
 171
 172/**
 173 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
 174 * @rq:              request with PI that should be prepared
 175 * @nr_bytes:        total bytes to prepare
 176 *
 177 * For Type 1/Type 2, the virtual start sector is the one that was
 178 * originally submitted by the block layer for the ref_tag usage. Due to
 179 * partitioning, MD/DM cloning, etc. the actual physical start sector is
 180 * likely to be different. Since the physical start sector was submitted
 181 * to the device, we should remap it back to virtual values expected by the
 182 * block layer.
 183 */
 184static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
 185{
 186        unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
 187        const int tuple_sz = rq->q->integrity.tuple_size;
 188        u32 ref_tag = t10_pi_ref_tag(rq);
 189        struct bio *bio;
 190
 191        __rq_for_each_bio(bio, rq) {
 192                struct bio_integrity_payload *bip = bio_integrity(bio);
 193                u32 virt = bip_get_seed(bip) & 0xffffffff;
 194                struct bio_vec iv;
 195                struct bvec_iter iter;
 196
 197                bip_for_each_vec(iv, bip, iter) {
 198                        unsigned int j;
 199                        void *p;
 200
 201                        p = bvec_kmap_local(&iv);
 202                        for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
 203                                struct t10_pi_tuple *pi = p;
 204
 205                                if (be32_to_cpu(pi->ref_tag) == ref_tag)
 206                                        pi->ref_tag = cpu_to_be32(virt);
 207                                virt++;
 208                                ref_tag++;
 209                                intervals--;
 210                                p += tuple_sz;
 211                        }
 212                        kunmap_local(p);
 213                }
 214        }
 215}
 216
 217static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
 218{
 219        return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
 220}
 221
 222static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
 223{
 224        return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
 225}
 226
 227static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
 228{
 229        return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
 230}
 231
 232static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
 233{
 234        return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
 235}
 236
 237/* Type 3 does not have a reference tag so no remapping is required. */
 238static void t10_pi_type3_prepare(struct request *rq)
 239{
 240}
 241
 242/* Type 3 does not have a reference tag so no remapping is required. */
 243static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
 244{
 245}
 246
 247const struct blk_integrity_profile t10_pi_type1_crc = {
 248        .name                   = "T10-DIF-TYPE1-CRC",
 249        .generate_fn            = t10_pi_type1_generate_crc,
 250        .verify_fn              = t10_pi_type1_verify_crc,
 251        .prepare_fn             = t10_pi_type1_prepare,
 252        .complete_fn            = t10_pi_type1_complete,
 253};
 254EXPORT_SYMBOL(t10_pi_type1_crc);
 255
 256const struct blk_integrity_profile t10_pi_type1_ip = {
 257        .name                   = "T10-DIF-TYPE1-IP",
 258        .generate_fn            = t10_pi_type1_generate_ip,
 259        .verify_fn              = t10_pi_type1_verify_ip,
 260        .prepare_fn             = t10_pi_type1_prepare,
 261        .complete_fn            = t10_pi_type1_complete,
 262};
 263EXPORT_SYMBOL(t10_pi_type1_ip);
 264
 265const struct blk_integrity_profile t10_pi_type3_crc = {
 266        .name                   = "T10-DIF-TYPE3-CRC",
 267        .generate_fn            = t10_pi_type3_generate_crc,
 268        .verify_fn              = t10_pi_type3_verify_crc,
 269        .prepare_fn             = t10_pi_type3_prepare,
 270        .complete_fn            = t10_pi_type3_complete,
 271};
 272EXPORT_SYMBOL(t10_pi_type3_crc);
 273
 274const struct blk_integrity_profile t10_pi_type3_ip = {
 275        .name                   = "T10-DIF-TYPE3-IP",
 276        .generate_fn            = t10_pi_type3_generate_ip,
 277        .verify_fn              = t10_pi_type3_verify_ip,
 278        .prepare_fn             = t10_pi_type3_prepare,
 279        .complete_fn            = t10_pi_type3_complete,
 280};
 281EXPORT_SYMBOL(t10_pi_type3_ip);
 282
 283static __be64 ext_pi_crc64(void *data, unsigned int len)
 284{
 285        return cpu_to_be64(crc64_rocksoft(data, len));
 286}
 287
 288static blk_status_t ext_pi_crc64_generate(struct blk_integrity_iter *iter,
 289                                        enum t10_dif_type type)
 290{
 291        unsigned int i;
 292
 293        for (i = 0 ; i < iter->data_size ; i += iter->interval) {
 294                struct crc64_pi_tuple *pi = iter->prot_buf;
 295
 296                pi->guard_tag = ext_pi_crc64(iter->data_buf, iter->interval);
 297                pi->app_tag = 0;
 298
 299                if (type == T10_PI_TYPE1_PROTECTION)
 300                        put_unaligned_be48(iter->seed, pi->ref_tag);
 301                else
 302                        put_unaligned_be48(0ULL, pi->ref_tag);
 303
 304                iter->data_buf += iter->interval;
 305                iter->prot_buf += iter->tuple_size;
 306                iter->seed++;
 307        }
 308
 309        return BLK_STS_OK;
 310}
 311
 312static bool ext_pi_ref_escape(u8 *ref_tag)
 313{
 314        static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 315
 316        return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
 317}
 318
 319static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
 320                                      enum t10_dif_type type)
 321{
 322        unsigned int i;
 323
 324        for (i = 0; i < iter->data_size; i += iter->interval) {
 325                struct crc64_pi_tuple *pi = iter->prot_buf;
 326                u64 ref, seed;
 327                __be64 csum;
 328
 329                if (type == T10_PI_TYPE1_PROTECTION) {
 330                        if (pi->app_tag == T10_PI_APP_ESCAPE)
 331                                goto next;
 332
 333                        ref = get_unaligned_be48(pi->ref_tag);
 334                        seed = lower_48_bits(iter->seed);
 335                        if (ref != seed) {
 336                                pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
 337                                        iter->disk_name, seed, ref);
 338                                return BLK_STS_PROTECTION;
 339                        }
 340                } else if (type == T10_PI_TYPE3_PROTECTION) {
 341                        if (pi->app_tag == T10_PI_APP_ESCAPE &&
 342                            ext_pi_ref_escape(pi->ref_tag))
 343                                goto next;
 344                }
 345
 346                csum = ext_pi_crc64(iter->data_buf, iter->interval);
 347                if (pi->guard_tag != csum) {
 348                        pr_err("%s: guard tag error at sector %llu " \
 349                               "(rcvd %016llx, want %016llx)\n",
 350                                iter->disk_name, (unsigned long long)iter->seed,
 351                                be64_to_cpu(pi->guard_tag), be64_to_cpu(csum));
 352                        return BLK_STS_PROTECTION;
 353                }
 354
 355next:
 356                iter->data_buf += iter->interval;
 357                iter->prot_buf += iter->tuple_size;
 358                iter->seed++;
 359        }
 360
 361        return BLK_STS_OK;
 362}
 363
 364static blk_status_t ext_pi_type1_verify_crc64(struct blk_integrity_iter *iter)
 365{
 366        return ext_pi_crc64_verify(iter, T10_PI_TYPE1_PROTECTION);
 367}
 368
 369static blk_status_t ext_pi_type1_generate_crc64(struct blk_integrity_iter *iter)
 370{
 371        return ext_pi_crc64_generate(iter, T10_PI_TYPE1_PROTECTION);
 372}
 373
 374static void ext_pi_type1_prepare(struct request *rq)
 375{
 376        const int tuple_sz = rq->q->integrity.tuple_size;
 377        u64 ref_tag = ext_pi_ref_tag(rq);
 378        struct bio *bio;
 379
 380        __rq_for_each_bio(bio, rq) {
 381                struct bio_integrity_payload *bip = bio_integrity(bio);
 382                u64 virt = lower_48_bits(bip_get_seed(bip));
 383                struct bio_vec iv;
 384                struct bvec_iter iter;
 385
 386                /* Already remapped? */
 387                if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
 388                        break;
 389
 390                bip_for_each_vec(iv, bip, iter) {
 391                        unsigned int j;
 392                        void *p;
 393
 394                        p = bvec_kmap_local(&iv);
 395                        for (j = 0; j < iv.bv_len; j += tuple_sz) {
 396                                struct crc64_pi_tuple *pi = p;
 397                                u64 ref = get_unaligned_be48(pi->ref_tag);
 398
 399                                if (ref == virt)
 400                                        put_unaligned_be48(ref_tag, pi->ref_tag);
 401                                virt++;
 402                                ref_tag++;
 403                                p += tuple_sz;
 404                        }
 405                        kunmap_local(p);
 406                }
 407
 408                bip->bip_flags |= BIP_MAPPED_INTEGRITY;
 409        }
 410}
 411
 412static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
 413{
 414        unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
 415        const int tuple_sz = rq->q->integrity.tuple_size;
 416        u64 ref_tag = ext_pi_ref_tag(rq);
 417        struct bio *bio;
 418
 419        __rq_for_each_bio(bio, rq) {
 420                struct bio_integrity_payload *bip = bio_integrity(bio);
 421                u64 virt = lower_48_bits(bip_get_seed(bip));
 422                struct bio_vec iv;
 423                struct bvec_iter iter;
 424
 425                bip_for_each_vec(iv, bip, iter) {
 426                        unsigned int j;
 427                        void *p;
 428
 429                        p = bvec_kmap_local(&iv);
 430                        for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
 431                                struct crc64_pi_tuple *pi = p;
 432                                u64 ref = get_unaligned_be48(pi->ref_tag);
 433
 434                                if (ref == ref_tag)
 435                                        put_unaligned_be48(virt, pi->ref_tag);
 436                                virt++;
 437                                ref_tag++;
 438                                intervals--;
 439                                p += tuple_sz;
 440                        }
 441                        kunmap_local(p);
 442                }
 443        }
 444}
 445
 446static blk_status_t ext_pi_type3_verify_crc64(struct blk_integrity_iter *iter)
 447{
 448        return ext_pi_crc64_verify(iter, T10_PI_TYPE3_PROTECTION);
 449}
 450
 451static blk_status_t ext_pi_type3_generate_crc64(struct blk_integrity_iter *iter)
 452{
 453        return ext_pi_crc64_generate(iter, T10_PI_TYPE3_PROTECTION);
 454}
 455
 456const struct blk_integrity_profile ext_pi_type1_crc64 = {
 457        .name                   = "EXT-DIF-TYPE1-CRC64",
 458        .generate_fn            = ext_pi_type1_generate_crc64,
 459        .verify_fn              = ext_pi_type1_verify_crc64,
 460        .prepare_fn             = ext_pi_type1_prepare,
 461        .complete_fn            = ext_pi_type1_complete,
 462};
 463EXPORT_SYMBOL_GPL(ext_pi_type1_crc64);
 464
 465const struct blk_integrity_profile ext_pi_type3_crc64 = {
 466        .name                   = "EXT-DIF-TYPE3-CRC64",
 467        .generate_fn            = ext_pi_type3_generate_crc64,
 468        .verify_fn              = ext_pi_type3_verify_crc64,
 469        .prepare_fn             = t10_pi_type3_prepare,
 470        .complete_fn            = t10_pi_type3_complete,
 471};
 472EXPORT_SYMBOL_GPL(ext_pi_type3_crc64);
 473
 474MODULE_LICENSE("GPL");
 475MODULE_LICENSE("GPL");
 476