linux/drivers/video/msm/mdp.c
<<
>>
Prefs
   1/* drivers/video/msm_fb/mdp.c
   2 *
   3 * MSM MDP Interface (used by framebuffer core)
   4 *
   5 * Copyright (C) 2007 QUALCOMM Incorporated
   6 * Copyright (C) 2007 Google Incorporated
   7 *
   8 * This software is licensed under the terms of the GNU General Public
   9 * License version 2, as published by the Free Software Foundation, and
  10 * may be copied, distributed, and modified under those terms.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/fb.h>
  20#include <linux/msm_mdp.h>
  21#include <linux/interrupt.h>
  22#include <linux/wait.h>
  23#include <linux/clk.h>
  24#include <linux/file.h>
  25#include <linux/major.h>
  26#include <linux/slab.h>
  27
  28#include <linux/platform_data/video-msm_fb.h>
  29#include <linux/platform_device.h>
  30#include <linux/export.h>
  31
  32#include "mdp_hw.h"
  33
  34struct class *mdp_class;
  35
  36#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
  37
  38static uint16_t mdp_default_ccs[] = {
  39        0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
  40        0x010, 0x080, 0x080
  41};
  42
  43static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
  44static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
  45static struct msmfb_callback *dma_callback;
  46static struct clk *clk;
  47static unsigned int mdp_irq_mask;
  48static DEFINE_SPINLOCK(mdp_lock);
  49DEFINE_MUTEX(mdp_mutex);
  50
  51static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
  52{
  53        unsigned long irq_flags;
  54        int ret = 0;
  55
  56        BUG_ON(!mask);
  57
  58        spin_lock_irqsave(&mdp_lock, irq_flags);
  59        /* if the mask bits are already set return an error, this interrupt
  60         * is already enabled */
  61        if (mdp_irq_mask & mask) {
  62                printk(KERN_ERR "mdp irq already on already on %x %x\n",
  63                       mdp_irq_mask, mask);
  64                ret = -1;
  65        }
  66        /* if the mdp irq is not already enabled enable it */
  67        if (!mdp_irq_mask) {
  68                if (clk)
  69                        clk_enable(clk);
  70                enable_irq(mdp->irq);
  71        }
  72
  73        /* update the irq mask to reflect the fact that the interrupt is
  74         * enabled */
  75        mdp_irq_mask |= mask;
  76        spin_unlock_irqrestore(&mdp_lock, irq_flags);
  77        return ret;
  78}
  79
  80static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
  81{
  82        /* this interrupt is already disabled! */
  83        if (!(mdp_irq_mask & mask)) {
  84                printk(KERN_ERR "mdp irq already off %x %x\n",
  85                       mdp_irq_mask, mask);
  86                return -1;
  87        }
  88        /* update the irq mask to reflect the fact that the interrupt is
  89         * disabled */
  90        mdp_irq_mask &= ~(mask);
  91        /* if no one is waiting on the interrupt, disable it */
  92        if (!mdp_irq_mask) {
  93                disable_irq_nosync(mdp->irq);
  94                if (clk)
  95                        clk_disable(clk);
  96        }
  97        return 0;
  98}
  99
 100static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
 101{
 102        unsigned long irq_flags;
 103        int ret;
 104
 105        spin_lock_irqsave(&mdp_lock, irq_flags);
 106        ret = locked_disable_mdp_irq(mdp, mask);
 107        spin_unlock_irqrestore(&mdp_lock, irq_flags);
 108        return ret;
 109}
 110
 111static irqreturn_t mdp_isr(int irq, void *data)
 112{
 113        uint32_t status;
 114        unsigned long irq_flags;
 115        struct mdp_info *mdp = data;
 116
 117        spin_lock_irqsave(&mdp_lock, irq_flags);
 118
 119        status = mdp_readl(mdp, MDP_INTR_STATUS);
 120        mdp_writel(mdp, status, MDP_INTR_CLEAR);
 121
 122        status &= mdp_irq_mask;
 123        if (status & DL0_DMA2_TERM_DONE) {
 124                if (dma_callback) {
 125                        dma_callback->func(dma_callback);
 126                        dma_callback = NULL;
 127                }
 128                wake_up(&mdp_dma2_waitqueue);
 129        }
 130
 131        if (status & DL0_ROI_DONE)
 132                wake_up(&mdp_ppp_waitqueue);
 133
 134        if (status)
 135                locked_disable_mdp_irq(mdp, status);
 136
 137        spin_unlock_irqrestore(&mdp_lock, irq_flags);
 138        return IRQ_HANDLED;
 139}
 140
 141static uint32_t mdp_check_mask(uint32_t mask)
 142{
 143        uint32_t ret;
 144        unsigned long irq_flags;
 145
 146        spin_lock_irqsave(&mdp_lock, irq_flags);
 147        ret = mdp_irq_mask & mask;
 148        spin_unlock_irqrestore(&mdp_lock, irq_flags);
 149        return ret;
 150}
 151
 152static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
 153{
 154        int ret = 0;
 155        unsigned long irq_flags;
 156
 157        wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
 158
 159        spin_lock_irqsave(&mdp_lock, irq_flags);
 160        if (mdp_irq_mask & mask) {
 161                locked_disable_mdp_irq(mdp, mask);
 162                printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
 163                       mask);
 164                ret = -ETIMEDOUT;
 165        }
 166        spin_unlock_irqrestore(&mdp_lock, irq_flags);
 167
 168        return ret;
 169}
 170
 171void mdp_dma_wait(struct mdp_device *mdp_dev)
 172{
 173#define MDP_MAX_TIMEOUTS 20
 174        static int timeout_count;
 175        struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
 176
 177        if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
 178                timeout_count++;
 179        else
 180                timeout_count = 0;
 181
 182        if (timeout_count > MDP_MAX_TIMEOUTS) {
 183                printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
 184                       MDP_MAX_TIMEOUTS);
 185                BUG();
 186        }
 187}
 188
 189static int mdp_ppp_wait(struct mdp_info *mdp)
 190{
 191        return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
 192}
 193
 194void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
 195                     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
 196                     struct msmfb_callback *callback)
 197{
 198        uint32_t dma2_cfg;
 199        uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
 200
 201        if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
 202                printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
 203                return;
 204        }
 205
 206        dma_callback = callback;
 207
 208        dma2_cfg = DMA_PACK_TIGHT |
 209                DMA_PACK_ALIGN_LSB |
 210                DMA_PACK_PATTERN_RGB |
 211                DMA_OUT_SEL_AHB |
 212                DMA_IBUF_NONCONTIGUOUS;
 213
 214        dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
 215
 216        dma2_cfg |= DMA_OUT_SEL_MDDI;
 217
 218        dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
 219
 220        dma2_cfg |= DMA_DITHER_EN;
 221
 222        /* setup size, address, and stride */
 223        mdp_writel(mdp, (height << 16) | (width),
 224                   MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
 225        mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
 226        mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
 227
 228        /* 666 18BPP */
 229        dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
 230
 231        /* set y & x offset and MDDI transaction parameters */
 232        mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
 233        mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
 234        mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
 235                   MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
 236
 237        mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
 238
 239        /* start DMA2 */
 240        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
 241}
 242
 243void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
 244             uint32_t width, uint32_t height, uint32_t x, uint32_t y,
 245             struct msmfb_callback *callback, int interface)
 246{
 247        struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
 248
 249        if (interface == MSM_MDDI_PMDH_INTERFACE) {
 250                mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
 251                                callback);
 252        }
 253}
 254
 255int get_img(struct mdp_img *img, struct fb_info *info,
 256            unsigned long *start, unsigned long *len,
 257            struct file **filep)
 258{
 259        int ret = 0;
 260        struct fd f = fdget(img->memory_id);
 261        if (f.file == NULL)
 262                return -1;
 263
 264        if (MAJOR(file_inode(f.file)->i_rdev) == FB_MAJOR) {
 265                *start = info->fix.smem_start;
 266                *len = info->fix.smem_len;
 267        } else
 268                ret = -1;
 269        fdput(f);
 270
 271        return ret;
 272}
 273
 274void put_img(struct file *src_file, struct file *dst_file)
 275{
 276}
 277
 278int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
 279             struct mdp_blit_req *req)
 280{
 281        int ret;
 282        unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
 283        struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
 284        struct file *src_file = 0, *dst_file = 0;
 285
 286        /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
 287        if (unlikely(req->src_rect.h == 0 ||
 288                     req->src_rect.w == 0)) {
 289                printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
 290                return -EINVAL;
 291        }
 292        if (unlikely(req->dst_rect.h == 0 ||
 293                     req->dst_rect.w == 0))
 294                return -EINVAL;
 295
 296        /* do this first so that if this fails, the caller can always
 297         * safely call put_img */
 298        if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
 299                printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
 300                                "memory\n");
 301                return -EINVAL;
 302        }
 303
 304        if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
 305                printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
 306                                "memory\n");
 307                return -EINVAL;
 308        }
 309        mutex_lock(&mdp_mutex);
 310
 311        /* transp_masking unimplemented */
 312        req->transp_mask = MDP_TRANSP_NOP;
 313        if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
 314                      req->alpha != MDP_ALPHA_NOP ||
 315                      HAS_ALPHA(req->src.format)) &&
 316                     (req->flags & MDP_ROT_90 &&
 317                      req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
 318                int i;
 319                unsigned int tiles = req->dst_rect.h / 16;
 320                unsigned int remainder = req->dst_rect.h % 16;
 321                req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
 322                req->dst_rect.h = 16;
 323                for (i = 0; i < tiles; i++) {
 324                        enable_mdp_irq(mdp, DL0_ROI_DONE);
 325                        ret = mdp_ppp_blit(mdp, req, src_file, src_start,
 326                                           src_len, dst_file, dst_start,
 327                                           dst_len);
 328                        if (ret)
 329                                goto err_bad_blit;
 330                        ret = mdp_ppp_wait(mdp);
 331                        if (ret)
 332                                goto err_wait_failed;
 333                        req->dst_rect.y += 16;
 334                        req->src_rect.x += req->src_rect.w;
 335                }
 336                if (!remainder)
 337                        goto end;
 338                req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
 339                req->dst_rect.h = remainder;
 340        }
 341        enable_mdp_irq(mdp, DL0_ROI_DONE);
 342        ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
 343                           dst_start,
 344                           dst_len);
 345        if (ret)
 346                goto err_bad_blit;
 347        ret = mdp_ppp_wait(mdp);
 348        if (ret)
 349                goto err_wait_failed;
 350end:
 351        put_img(src_file, dst_file);
 352        mutex_unlock(&mdp_mutex);
 353        return 0;
 354err_bad_blit:
 355        disable_mdp_irq(mdp, DL0_ROI_DONE);
 356err_wait_failed:
 357        put_img(src_file, dst_file);
 358        mutex_unlock(&mdp_mutex);
 359        return ret;
 360}
 361
 362void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
 363{
 364        struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
 365
 366        disp_id &= 0xf;
 367        mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
 368}
 369
 370int register_mdp_client(struct class_interface *cint)
 371{
 372        if (!mdp_class) {
 373                pr_err("mdp: no mdp_class when registering mdp client\n");
 374                return -ENODEV;
 375        }
 376        cint->class = mdp_class;
 377        return class_interface_register(cint);
 378}
 379
 380#include "mdp_csc_table.h"
 381#include "mdp_scale_tables.h"
 382
 383int mdp_probe(struct platform_device *pdev)
 384{
 385        struct resource *resource;
 386        int ret;
 387        int n;
 388        struct mdp_info *mdp;
 389
 390        resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 391        if (!resource) {
 392                pr_err("mdp: can not get mdp mem resource!\n");
 393                return -ENOMEM;
 394        }
 395
 396        mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
 397        if (!mdp)
 398                return -ENOMEM;
 399
 400        mdp->irq = platform_get_irq(pdev, 0);
 401        if (mdp->irq < 0) {
 402                pr_err("mdp: can not get mdp irq\n");
 403                ret = mdp->irq;
 404                goto error_get_irq;
 405        }
 406
 407        mdp->base = ioremap(resource->start, resource_size(resource));
 408        if (mdp->base == 0) {
 409                printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
 410                ret = -ENOMEM;
 411                goto error_ioremap;
 412        }
 413
 414        mdp->mdp_dev.dma = mdp_dma;
 415        mdp->mdp_dev.dma_wait = mdp_dma_wait;
 416        mdp->mdp_dev.blit = mdp_blit;
 417        mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
 418
 419        clk = clk_get(&pdev->dev, "mdp_clk");
 420        if (IS_ERR(clk)) {
 421                printk(KERN_INFO "mdp: failed to get mdp clk");
 422                ret = PTR_ERR(clk);
 423                goto error_get_clk;
 424        }
 425
 426        ret = request_irq(mdp->irq, mdp_isr, 0, "msm_mdp", mdp);
 427        if (ret)
 428                goto error_request_irq;
 429        disable_irq(mdp->irq);
 430        mdp_irq_mask = 0;
 431
 432        /* debug interface write access */
 433        mdp_writel(mdp, 1, 0x60);
 434
 435        mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
 436        mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
 437
 438        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
 439        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
 440
 441        for (n = 0; n < ARRAY_SIZE(csc_table); n++)
 442                mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
 443
 444        /* clear up unused fg/main registers */
 445        /* comp.plane 2&3 ystride */
 446        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
 447
 448        /* unpacked pattern */
 449        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
 450        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
 451        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
 452        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
 453        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
 454        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
 455        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
 456        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
 457        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
 458
 459        /* comp.plane 2 & 3 */
 460        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
 461        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
 462
 463        /* clear unused bg registers */
 464        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
 465        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
 466        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
 467        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
 468        mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
 469
 470        for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
 471                mdp_writel(mdp, mdp_upscale_table[n].val,
 472                       mdp_upscale_table[n].reg);
 473
 474        for (n = 0; n < 9; n++)
 475                mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
 476        mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
 477        mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
 478        mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
 479
 480        /* register mdp device */
 481        mdp->mdp_dev.dev.parent = &pdev->dev;
 482        mdp->mdp_dev.dev.class = mdp_class;
 483        dev_set_name(&mdp->mdp_dev.dev, "mdp%d", pdev->id);
 484
 485        /* if you can remove the platform device you'd have to implement
 486         * this:
 487        mdp_dev.release = mdp_class; */
 488
 489        ret = device_register(&mdp->mdp_dev.dev);
 490        if (ret)
 491                goto error_device_register;
 492        return 0;
 493
 494error_device_register:
 495        free_irq(mdp->irq, mdp);
 496error_request_irq:
 497error_get_clk:
 498        iounmap(mdp->base);
 499error_get_irq:
 500error_ioremap:
 501        kfree(mdp);
 502        return ret;
 503}
 504
 505static struct platform_driver msm_mdp_driver = {
 506        .probe = mdp_probe,
 507        .driver = {.name = "msm_mdp"},
 508};
 509
 510static int __init mdp_init(void)
 511{
 512        mdp_class = class_create(THIS_MODULE, "msm_mdp");
 513        if (IS_ERR(mdp_class)) {
 514                printk(KERN_ERR "Error creating mdp class\n");
 515                return PTR_ERR(mdp_class);
 516        }
 517        return platform_driver_register(&msm_mdp_driver);
 518}
 519
 520subsys_initcall(mdp_init);
 521
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.