diff options
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch')
-rw-r--r-- | meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch | 37524 |
1 files changed, 0 insertions, 37524 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch deleted file mode 100644 index 4ffda75e15..0000000000 --- a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch +++ /dev/null @@ -1,37524 +0,0 @@ -diff -uNr a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig ---- a/drivers/gpu/drm/Kconfig 2009-03-23 15:12:14.000000000 -0800 -+++ b/drivers/gpu/drm/Kconfig 2009-04-07 13:28:38.000000000 -0700 -@@ -122,3 +122,14 @@ - help - Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister - chipset. If M is selected the module will be called savage. -+ -+config DRM_PSB -+ tristate "Intel Poulsbo/Moorestown" -+ depends on DRM && PCI -+ select FB_CFB_COPYAREA -+ select FB_CFB_FILLRECT -+ select FB_CFB_IMAGEBLIT -+ help -+ Choose this option if you have a Poulsbo or Moorestown platform. -+ If M is selected the module will be called psb. -+ -diff -uNr a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile ---- a/drivers/gpu/drm/Makefile 2009-03-23 15:12:14.000000000 -0800 -+++ b/drivers/gpu/drm/Makefile 2009-04-07 13:28:38.000000000 -0700 -@@ -25,4 +25,5 @@ - obj-$(CONFIG_DRM_SIS) += sis/ - obj-$(CONFIG_DRM_SAVAGE)+= savage/ - obj-$(CONFIG_DRM_VIA) +=via/ -+obj-$(CONFIG_DRM_PSB) +=psb/ - -diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c ---- a/drivers/gpu/drm/psb/lnc_topaz.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/lnc_topaz.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,695 @@ -+/** -+ * file lnc_topaz.c -+ * TOPAZ I/O operations and IRQ handling -+ * -+ */ -+ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+/* include headers */ -+/* #define DRM_DEBUG_CODE 2 */ -+ -+#include <drm/drmP.h> -+#include <drm/drm_os_linux.h> -+ -+#include "psb_drv.h" -+#include "psb_drm.h" -+#include "lnc_topaz.h" -+ -+#include <linux/io.h> -+#include <linux/delay.h> -+ -+static int drm_psb_ospmxxx = 0x0; -+ -+/* static function define */ -+static int lnc_topaz_deliver_command(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, -+ unsigned long cmd_size, -+ void **topaz_cmd, uint32_t sequence, -+ int copy_cmd); -+static int lnc_topaz_send(struct drm_device *dev, void *cmd, -+ unsigned long cmd_size, uint32_t sync_seq); -+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd); -+static int lnc_topaz_dequeue_send(struct drm_device *dev); -+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd, -+ unsigned long cmd_size, uint32_t sequence); -+ -+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ uint32_t clr_flag = lnc_topaz_queryirq(dev); -+ -+ lnc_topaz_clearirq(dev, clr_flag); -+ -+ /* ignore non-SYNC interrupts */ -+ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0) -+ return; -+ -+ dev_priv->topaz_current_sequence = -+ *(uint32_t *)dev_priv->topaz_sync_addr; -+ -+ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n", -+ dev_priv->topaz_current_sequence, -+ dev_priv->sequence[LNC_ENGINE_ENCODE]); -+ -+ psb_fence_handler(dev, LNC_ENGINE_ENCODE); -+ -+ dev_priv->topaz_busy = 1; -+ lnc_topaz_dequeue_send(dev); -+} -+ -+static int lnc_submit_encode_cmdbuf(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, unsigned long cmd_size, -+ struct ttm_fence_object *fence) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ unsigned long irq_flags; -+ int ret = 0; -+ void *cmd; -+ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE]; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: command submit\n"); -+ -+ /* # lock topaz's mutex [msvdx_mutex] */ -+ mutex_lock(&dev_priv->topaz_mutex); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", dev_priv->topaz_busy); -+ -+ if (dev_priv->topaz_fw_loaded == 0) { -+ /* #.# load fw to driver */ -+ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n"); -+ ret = topaz_init_fw(dev); -+ if (ret != 0) { -+ mutex_unlock(&dev_priv->topaz_mutex); -+ -+ /* FIXME: find a proper return value */ -+ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail," -+ "ensure udevd is configured correctly!\n"); -+ -+ return -EFAULT; -+ } -+ dev_priv->topaz_fw_loaded = 1; -+ } else { -+ /* OSPM power state change */ -+ /* FIXME: why here? why not in the NEW_CODEC case? */ -+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) { -+ psb_power_up_topaz(dev); -+ lnc_topaz_restore_mtx_state(dev); -+ } -+ } -+ -+ /* # schedule watchdog */ -+ /* psb_schedule_watchdog(dev_priv); */ -+ -+ /* # spin lock irq save [msvdx_lock] */ -+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags); -+ -+ /* # if topaz need to reset, reset it */ -+ if (dev_priv->topaz_needs_reset) { -+ /* #.# reset it */ -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n"); -+ -+ if (lnc_topaz_reset(dev_priv)) { -+ mutex_unlock(&dev_priv->topaz_mutex); -+ ret = -EBUSY; -+ DRM_ERROR("TOPAZ: reset failed.\n"); -+ return ret; -+ } -+ -+ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n"); -+ -+ /* #.# reset any related flags */ -+ dev_priv->topaz_needs_reset = 0; -+ dev_priv->topaz_busy = 0; -+ PSB_DEBUG_GENERAL("XXX: does we need idle flag??\n"); -+ dev_priv->topaz_start_idle = 0; -+ -+ /* #.# init topaz */ -+ lnc_topaz_init(dev); -+ -+ /* avoid another fw init */ -+ dev_priv->topaz_fw_loaded = 1; -+ -+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags); -+ } -+ -+ if (!dev_priv->topaz_busy) { -+ /* # direct map topaz command if topaz is free */ -+ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n", -+ sequence); -+ -+ dev_priv->topaz_busy = 1; -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ -+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset, -+ cmd_size, NULL, sequence, 0); -+ -+ if (ret) { -+ DRM_ERROR("TOPAZ: failed to extract cmd...\n"); -+ mutex_unlock(&dev_priv->topaz_mutex); -+ return ret; -+ } -+ } else { -+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n", -+ sequence); -+ cmd = NULL; -+ -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ -+ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset, -+ cmd_size, &cmd, sequence, 1); -+ if (cmd == NULL || ret) { -+ DRM_ERROR("TOPAZ: map command for save fialed\n"); -+ mutex_unlock(&dev_priv->topaz_mutex); -+ return ret; -+ } -+ -+ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence); -+ if (ret) -+ DRM_ERROR("TOPAZ: save command failed\n"); -+ } -+ -+ /* OPSM D0IX power state change */ -+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) -+ lnc_topaz_save_mtx_state(dev); -+ -+ mutex_unlock(&dev_priv->topaz_mutex); -+ -+ return ret; -+} -+ -+static int lnc_topaz_save_command(struct drm_device *dev, void *cmd, -+ unsigned long cmd_size, uint32_t sequence) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct lnc_topaz_cmd_queue *topaz_cmd; -+ unsigned long irq_flags; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n", -+ sequence); -+ -+ topaz_cmd = drm_calloc(1, sizeof(struct lnc_topaz_cmd_queue), -+ DRM_MEM_DRIVER); -+ if (topaz_cmd == NULL) { -+ mutex_unlock(&dev_priv->topaz_mutex); -+ DRM_ERROR("TOPAZ: out of memory....\n"); -+ return -ENOMEM; -+ } -+ -+ topaz_cmd->cmd = cmd; -+ topaz_cmd->cmd_size = cmd_size; -+ topaz_cmd->sequence = sequence; -+ -+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags); -+ list_add_tail(&topaz_cmd->head, &dev_priv->topaz_queue); -+ if (!dev_priv->topaz_busy) { -+ /* dev_priv->topaz_busy = 1; */ -+ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n"); -+ lnc_topaz_dequeue_send(dev); -+ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n"); -+ } -+ -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ -+ return 0; -+} -+ -+ -+int lnc_cmdbuf_video(struct drm_file *priv, -+ struct list_head *validate_list, -+ uint32_t fence_type, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg) -+{ -+ struct drm_device *dev = priv->minor->dev; -+ struct ttm_fence_object *fence = NULL; -+ int ret; -+ -+ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, -+ arg->cmdbuf_size, fence); -+ if (ret) -+ return ret; -+ -+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */ -+ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags, -+ validate_list, fence_arg, &fence); -+ -+ if (fence) -+ ttm_fence_object_unref(&fence); -+#endif -+ -+ mutex_lock(&cmd_buffer->mutex); -+ if (cmd_buffer->sync_obj != NULL) -+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); -+ mutex_unlock(&cmd_buffer->mutex); -+ -+ return 0; -+} -+ -+static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t sync_cmd[3]; -+ int count = 10000; -+#if 0 -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[LNC_ENGINE_ENCODE]; -+ unsigned long irq_flags; -+#endif -+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr; -+ -+ /* insert a SYNC command here */ -+ dev_priv->topaz_sync_cmd_seq = (1 << 15) | dev_priv->topaz_cmd_seq++; -+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) | -+ (dev_priv->topaz_sync_cmd_seq << 16); -+ sync_cmd[1] = dev_priv->topaz_sync_offset; -+ sync_cmd[2] = sync_seq; -+ -+ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x)," -+ "sync_seq (0x%08x)\n", -+ dev_priv->topaz_sync_cmd_seq, sync_seq); -+ -+ lnc_mtx_send(dev_priv, sync_cmd); -+ -+#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */ -+ /* # poll topaz register for certain times */ -+ while (count && *sync_p != sync_seq) { -+ DRM_UDELAY(100); -+ --count; -+ } -+ if ((count == 0) && (*sync_p != sync_seq)) { -+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n", -+ sync_seq, *sync_p); -+ return -EBUSY; -+ } -+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p); -+ -+ dev_priv->topaz_busy = 0; -+ -+ /* XXX: check psb_fence_handler is suitable for topaz */ -+ dev_priv->topaz_current_sequence = *sync_p; -+#if 0 -+ write_lock_irqsave(&fc->lock, irq_flags); -+ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE, -+ dev_priv->topaz_current_sequence, -+ _PSB_FENCE_TYPE_EXE, 0); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+#endif -+#endif -+ return 0; -+} -+ -+int -+lnc_topaz_deliver_command(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, unsigned long cmd_size, -+ void **topaz_cmd, uint32_t sequence, -+ int copy_cmd) -+{ -+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK; -+ struct ttm_bo_kmap_obj cmd_kmap; -+ bool is_iomem; -+ int ret; -+ unsigned char *cmd_start, *tmp; -+ -+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, -+ &cmd_kmap); -+ if (ret) { -+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret); -+ return ret; -+ } -+ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap, -+ &is_iomem) + cmd_page_offset; -+ -+ if (copy_cmd) { -+ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n"); -+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER); -+ if (tmp == NULL) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ memcpy(tmp, cmd_start, cmd_size); -+ *topaz_cmd = tmp; -+ } else { -+ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n"); -+ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence); -+ if (ret) { -+ DRM_ERROR("TOPAZ: commit commands failed.\n"); -+ ret = -EINVAL; -+ } -+ } -+ -+out: -+ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n", -+ cmd_size, sequence, copy_cmd); -+ -+ ttm_bo_kunmap(&cmd_kmap); -+ -+ return ret; -+} -+ -+int -+lnc_topaz_send(struct drm_device *dev, void *cmd, -+ unsigned long cmd_size, uint32_t sync_seq) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ int ret = 0; -+ unsigned char *command = (unsigned char *) cmd; -+ struct topaz_cmd_header *cur_cmd_header; -+ uint32_t cur_cmd_size, cur_cmd_id; -+ uint32_t codec; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n"); -+ -+ while (cmd_size > 0) { -+ cur_cmd_header = (struct topaz_cmd_header *) command; -+ cur_cmd_size = cur_cmd_header->size * 4; -+ cur_cmd_id = cur_cmd_header->id; -+ -+ switch (cur_cmd_id) { -+ case MTX_CMDID_SW_NEW_CODEC: -+ codec = *((uint32_t *) cmd + 1); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n", -+ codec_to_string(codec), codec); -+ if (topaz_setup_fw(dev, codec)) { -+ DRM_ERROR("TOPAZ: upload FW to HW failed\n"); -+ return -EBUSY; -+ } -+ -+ dev_priv->topaz_cur_codec = codec; -+ break; -+ -+ case MTX_CMDID_SW_ENTER_LOWPOWER: -+ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n"); -+ PSB_DEBUG_GENERAL("XXX: implement it\n"); -+ break; -+ -+ case MTX_CMDID_SW_LEAVE_LOWPOWER: -+ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n"); -+ PSB_DEBUG_GENERAL("XXX: implement it\n"); -+ break; -+ -+ /* ordinary commmand */ -+ case MTX_CMDID_START_PIC: -+ /* XXX: specially handle START_PIC hw command */ -+ CCB_CTRL_SET_QP(dev_priv, -+ *(command + cur_cmd_size - 4)); -+ /* strip the QP parameter (it's software arg) */ -+ cur_cmd_header->size--; -+ default: -+ cur_cmd_header->seq = 0x7fff & -+ dev_priv->topaz_cmd_seq++; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d)," -+ " seq (0x%04x)\n", -+ cmd_to_string(cur_cmd_id), -+ cur_cmd_size, cur_cmd_header->seq); -+ ret = lnc_mtx_send(dev_priv, command); -+ if (ret) { -+ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret); -+ goto out; -+ } -+ break; -+ } -+ -+ command += cur_cmd_size; -+ cmd_size -= cur_cmd_size; -+ } -+ lnc_topaz_sync(dev, sync_seq); -+out: -+ return ret; -+} -+ -+static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd) -+{ -+ struct topaz_cmd_header *cur_cmd_header = -+ (struct topaz_cmd_header *) cmd; -+ uint32_t cmd_size = cur_cmd_header->size; -+ uint32_t read_index, write_index; -+ const uint32_t *cmd_pointer = (uint32_t *) cmd; -+ -+ int ret = 0; -+ -+ /* <msvdx does> # enable all clock */ -+ -+ write_index = dev_priv->topaz_cmd_windex; -+ if (write_index + cmd_size + 1 > dev_priv->topaz_ccb_size) { -+ int free_space = dev_priv->topaz_ccb_size - write_index; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n"); -+ if (free_space > 0) { -+ struct topaz_cmd_header pad_cmd; -+ -+ pad_cmd.id = MTX_CMDID_NULL; -+ pad_cmd.size = free_space; -+ pad_cmd.seq = 0x7fff & dev_priv->topaz_cmd_seq++; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:" -+ " size(%d),seq (0x%04x)\n", -+ pad_cmd.size, pad_cmd.seq); -+ -+ TOPAZ_BEGIN_CCB(dev_priv); -+ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val); -+ TOPAZ_END_CCB(dev_priv, 1); -+ } -+ POLL_WB_RINDEX(dev_priv, 0); -+ if (ret == 0) -+ dev_priv->topaz_cmd_windex = 0; -+ else { -+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); -+ return ret; /* HW may hang, need reset */ -+ } -+ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n"); -+ } -+ -+ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */ -+ write_index = dev_priv->topaz_cmd_windex; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n", -+ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv)); -+ TOPAZ_BEGIN_CCB(dev_priv); -+ while (cmd_size > 0) { -+ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++); -+ --cmd_size; -+ } -+ TOPAZ_END_CCB(dev_priv, 1); -+ -+ POLL_WB_RINDEX(dev_priv, dev_priv->topaz_cmd_windex); -+ -+#if 0 -+ DRM_UDELAY(1000); -+ lnc_topaz_clearirq(dev, -+ lnc_topaz_queryirq(dev)); -+ LNC_TRACEL("TOPAZ: after clear, query again\n"); -+ lnc_topaz_queryirq(dev_priv); -+#endif -+ -+ return ret; -+} -+ -+int lnc_topaz_dequeue_send(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct lnc_topaz_cmd_queue *topaz_cmd = NULL; -+ int ret; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n"); -+ -+ if (list_empty(&dev_priv->topaz_queue)) { -+ dev_priv->topaz_busy = 0; -+ return 0; -+ } -+ -+ topaz_cmd = list_first_entry(&dev_priv->topaz_queue, -+ struct lnc_topaz_cmd_queue, head); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence); -+ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size, -+ topaz_cmd->sequence); -+ if (ret) { -+ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n"); -+ ret = -EINVAL; -+ } -+ -+ list_del(&topaz_cmd->head); -+ kfree(topaz_cmd->cmd); -+ drm_free(topaz_cmd, sizeof(struct lnc_topaz_cmd_queue), -+ DRM_MEM_DRIVER); -+ -+ return ret; -+} -+ -+void -+lnc_topaz_lockup(struct drm_psb_private *dev_priv, -+ int *topaz_lockup, int *topaz_idle) -+{ -+ unsigned long irq_flags; -+ uint32_t tmp; -+ -+ /* if have printk in this function, you will have plenties here */ -+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags); -+ *topaz_lockup = 0; -+ *topaz_idle = 1; -+ -+ if (!dev_priv->has_topaz) { -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ return; -+ } -+ -+ tmp = dev_priv->topaz_current_sequence -+ - dev_priv->sequence[LNC_ENGINE_ENCODE]; -+ if (tmp > 0x0FFFFFFF) { -+ if (dev_priv->topaz_current_sequence == -+ dev_priv->topaz_last_sequence) { -+ *topaz_lockup = 1; -+ } else { -+ dev_priv->topaz_last_sequence = -+ dev_priv->topaz_current_sequence; -+ *topaz_idle = 0; -+ } -+ -+ if (dev_priv->topaz_start_idle) -+ dev_priv->topaz_start_idle = 0; -+ } else { -+ if (dev_priv->topaz_needs_reset == 0) { -+ if (dev_priv->topaz_start_idle && -+ (dev_priv->topaz_finished_sequence -+ == dev_priv->topaz_current_sequence)) { -+ if (time_after_eq(jiffies, -+ dev_priv->topaz_idle_start_jiffies + -+ TOPAZ_MAX_IDELTIME)) { -+ -+ /* XXX: disable clock <msvdx does> */ -+ dev_priv->topaz_needs_reset = 1; -+ } else -+ *topaz_idle = 0; -+ } else { -+ dev_priv->topaz_start_idle = 1; -+ dev_priv->topaz_idle_start_jiffies = jiffies; -+ dev_priv->topaz_finished_sequence = -+ dev_priv->topaz_current_sequence; -+ *topaz_idle = 0; -+ } -+ } -+ } -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+} -+ -+ -+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count) -+{ -+ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count); -+ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count); -+} -+ -+/* power up msvdx, OSPM function */ -+int psb_power_up_topaz(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ -+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWERON) -+ return 0; -+ -+ psb_up_island_power(dev, PSB_VIDEO_ENC_ISLAND); -+ -+ PSB_DEBUG_GENERAL("FIXME: how to write clock state for topaz?" -+ " so many clock\n"); -+ /* PSB_WMSVDX32(dev_priv->topaz_clk_state, MSVDX_MAN_CLK_ENABLE); */ -+ -+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n"); -+ -+ PSB_DEBUG_GENERAL("FIXME: flush all mmu\n"); -+ -+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON; -+ -+ return 0; -+} -+ -+int psb_power_down_topaz(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ -+ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWEROFF) -+ return 0; -+ -+ if (dev_priv->topaz_busy) { -+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n"); -+ return -EBUSY; -+ } -+ PSB_DEBUG_GENERAL("FIXME: how to read clock state for topaz?" -+ " so many clock\n"); -+ /* dev_priv->topaz_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE); */ -+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n"); -+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n"); -+ -+ psb_down_island_power(dev, PSB_VIDEO_ENC_ISLAND); -+ -+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF; -+ -+ return 0; -+} -+ -+int lnc_prepare_topaz_suspend(struct drm_device *dev) -+{ -+ /* FIXME: need reset when resume? -+ * Is mtx restore enough for encoder continue run? */ -+ /* dev_priv->topaz_needs_reset = 1; */ -+ -+ /* make sure all IRQs are seviced */ -+ -+ /* make sure all the fence is signaled */ -+ -+ /* save mtx context into somewhere */ -+ /* lnc_topaz_save_mtx_state(dev); */ -+ -+ return 0; -+} -+ -+int lnc_prepare_topaz_resume(struct drm_device *dev) -+{ -+ /* FIXME: need reset when resume? -+ * Is mtx restore enough for encoder continue run? */ -+ /* dev_priv->topaz_needs_reset = 1; */ -+ -+ /* make sure IRQ is open */ -+ -+ /* restore mtx context */ -+ /* lnc_topaz_restore_mtx_state(dev); */ -+ -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h ---- a/drivers/gpu/drm/psb/lnc_topaz.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/lnc_topaz.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,803 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#ifndef _LNC_TOPAZ_H_ -+#define _LNC_TOPAZ_H_ -+ -+#include "psb_drv.h" -+ -+#define LNC_TOPAZ_NO_IRQ 1 -+#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4) -+#define ENABLE_TOPAZ_OSPM_D0IX (0x10) -+ -+/* extern int drm_psb_ospm; */ -+ -+int psb_power_up_topaz(struct drm_device *dev); -+int psb_power_down_topaz(struct drm_device *dev); -+int lnc_prepare_topaz_suspend(struct drm_device *dev); -+int lnc_prepare_topaz_resume(struct drm_device *dev); -+ -+/* -+ * MACROS to insert values into fields within a word. The basename of the -+ * field must have MASK_BASENAME and SHIFT_BASENAME constants. -+ */ -+#define MM_WRITE32(base, offset, value) \ -+do { \ -+ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \ -+ + base + offset)) = value; \ -+} while (0) -+ -+#define MM_READ32(base, offset, pointer) \ -+do { \ -+ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\ -+ + base + offset)); \ -+} while (0) -+ -+#define F_MASK(basename) (MASK_##basename) -+#define F_SHIFT(basename) (SHIFT_##basename) -+ -+#define F_ENCODE(val, basename) \ -+ (((val) << (F_SHIFT(basename))) & (F_MASK(basename))) -+ -+/* MVEA macro */ -+#define MVEA_START 0x03000 -+ -+#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value) -+#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer); -+ -+#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */ -+#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */ -+#define F_ENCODE_MVEA(val, basename) \ -+ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename))) -+ -+/* VLC macro */ -+#define TOPAZ_VLC_START 0x05000 -+ -+/* TOPAZ macro */ -+#define TOPAZ_START 0x02000 -+ -+#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value) -+#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer) -+ -+#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename) -+#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename) -+#define F_ENCODE_TOPAZ(val,basename) \ -+ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename))) -+ -+/* MTX macro */ -+#define MTX_START 0x0 -+ -+#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value) -+#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer) -+ -+/* DMAC macro */ -+#define DMAC_START 0x0f000 -+ -+#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value) -+#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer) -+ -+#define F_MASK_DMAC(basename) (MASK_DMAC_##basename) -+#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename) -+#define F_ENCODE_DMAC(val,basename) \ -+ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename))) -+ -+ -+/* Register CR_IMG_TOPAZ_INTENAB */ -+#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008 -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008 -+ -+#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C -+ -+#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004 -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004 -+ -+#define MTX_CCBCTRL_ROFF 0 -+#define MTX_CCBCTRL_COMPLETE 4 -+#define MTX_CCBCTRL_CCBSIZE 8 -+#define MTX_CCBCTRL_QP 12 -+#define MTX_CCBCTRL_INITQP 24 -+ -+#define TOPAZ_CR_MMU_STATUS 0x001C -+#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001 -+#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0 -+#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C -+ -+#define TOPAZ_CR_MMU_MEM_REQ 0x0020 -+#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF -+#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0 -+#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C -+ -+#define MTX_CR_MTX_KICK 0x0080 -+#define MASK_MTX_MTX_KICK 0x0000FFFF -+#define SHIFT_MTX_MTX_KICK 0 -+#define REGNUM_MTX_MTX_KICK 0x0080 -+ -+#define MTX_DATA_MEM_BASE 0x82880000 -+ -+#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108 -+#define MASK_MTX_MTX_MCMR 0x00000001 -+#define SHIFT_MTX_MTX_MCMR 0 -+#define REGNUM_MTX_MTX_MCMR 0x0108 -+ -+#define MASK_MTX_MTX_MCMID 0x0FF00000 -+#define SHIFT_MTX_MTX_MCMID 20 -+#define REGNUM_MTX_MTX_MCMID 0x0108 -+ -+#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC -+#define SHIFT_MTX_MTX_MCM_ADDR 2 -+#define REGNUM_MTX_MTX_MCM_ADDR 0x0108 -+ -+#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C -+#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001 -+#define SHIFT_MTX_MTX_MTX_MCM_STAT 0 -+#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C -+ -+#define MASK_MTX_MTX_MCMAI 0x00000002 -+#define SHIFT_MTX_MTX_MCMAI 1 -+#define REGNUM_MTX_MTX_MCMAI 0x0108 -+ -+#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104 -+ -+#define MVEA_CR_IMG_MVEA_SRST 0x0000 -+#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001 -+#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0 -+#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000 -+ -+#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002 -+#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1 -+#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000 -+ -+#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004 -+#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2 -+#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000 -+ -+#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008 -+#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3 -+#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000 -+ -+#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010 -+#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4 -+#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000 -+ -+#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020 -+#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5 -+#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000 -+ -+#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0 -+#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0 -+ -+#define TOPAZ_MTX_PC (0x00000005) -+#define PC_START_ADDRESS (0x80900000) -+ -+#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014 -+#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001 -+#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0 -+#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014 -+ -+#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002 -+#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1 -+#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014 -+ -+#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002 -+#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1 -+#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010 -+ -+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8 -+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC -+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000 -+#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000 -+ -+#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C -+ -+#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004 -+#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2 -+#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C -+ -+#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018 -+#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3 -+#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C -+ -+#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108 -+ -+#define TOPAZ_CR_MMU_CONTROL0 0x0024 -+#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800 -+#define SHIFT_TOPAZ_CR_MMU_BYPASS 11 -+#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024 -+ -+#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X))) -+#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000 -+#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12 -+#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030 -+ -+#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008 -+#define SHIFT_TOPAZ_CR_MMU_INVALDC 3 -+#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024 -+ -+#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004 -+#define SHIFT_TOPAZ_CR_MMU_FLUSH 2 -+#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024 -+ -+#define TOPAZ_CR_MMU_BANK_INDEX 0x0038 -+#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2))) -+#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2)) -+#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038 -+ -+#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010 -+#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001 -+#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0 -+#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010 -+ -+#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c -+#define TXRPT_WAITONKICK_VALUE 0x8ade0000 -+ -+#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002 -+ -+#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000 -+#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004 -+ -+#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200 -+#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001 -+ -+#define MTX_CR_MTX_SYSC_CDMAA 0x0344 -+#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC -+#define SHIFT_MTX_CDMAA_ADDRESS 2 -+#define REGNUM_MTX_CDMAA_ADDRESS 0x0344 -+ -+#define MTX_CR_MTX_SYSC_CDMAC 0x0340 -+#define MASK_MTX_LENGTH 0x0000FFFF -+#define SHIFT_MTX_LENGTH 0 -+#define REGNUM_MTX_LENGTH 0x0340 -+ -+#define MASK_MTX_BURSTSIZE 0x07000000 -+#define SHIFT_MTX_BURSTSIZE 24 -+#define REGNUM_MTX_BURSTSIZE 0x0340 -+ -+#define MASK_MTX_RNW 0x00020000 -+#define SHIFT_MTX_RNW 17 -+#define REGNUM_MTX_RNW 0x0340 -+ -+#define MASK_MTX_ENABLE 0x00010000 -+#define SHIFT_MTX_ENABLE 16 -+#define REGNUM_MTX_ENABLE 0x0340 -+ -+#define MASK_MTX_LENGTH 0x0000FFFF -+#define SHIFT_MTX_LENGTH 0 -+#define REGNUM_MTX_LENGTH 0x0340 -+ -+#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000 -+#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000 -+ -+#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002 -+#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1 -+#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000 -+ -+#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024 -+#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001 -+#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0 -+#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024 -+ -+#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002 -+#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1 -+#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024 -+ -+#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004 -+#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2 -+#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024 -+ -+#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008 -+#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3 -+#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024 -+ -+#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040 -+#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001 -+#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0 -+#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040 -+ -+#define MTX_CR_MTX_SYSC_CDMAT 0x0350 -+#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF -+#define SHIFT_MTX_TRANSFERDATA 0 -+#define REGNUM_MTX_TRANSFERDATA 0x0350 -+ -+#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X))) -+#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000 -+#define SHIFT_IMG_SOC_TRANSFER_FIN 17 -+#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C -+ -+#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X))) -+#define MASK_IMG_SOC_CNT 0x0000FFFF -+#define SHIFT_IMG_SOC_CNT 0 -+#define REGNUM_IMG_SOC_CNT 0x0004 -+ -+#define MASK_IMG_SOC_EN 0x00010000 -+#define SHIFT_IMG_SOC_EN 16 -+#define REGNUM_IMG_SOC_EN 0x0004 -+ -+#define MASK_IMG_SOC_LIST_EN 0x00040000 -+#define SHIFT_IMG_SOC_LIST_EN 18 -+#define REGNUM_IMG_SOC_LIST_EN 0x0004 -+ -+#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X))) -+#define MASK_IMG_SOC_PER_HOLD 0x0000007F -+#define SHIFT_IMG_SOC_PER_HOLD 0 -+#define REGNUM_IMG_SOC_PER_HOLD 0x0018 -+ -+#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X))) -+#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF -+#define SHIFT_IMG_SOC_START_ADDRESS 0 -+#define REGNUM_IMG_SOC_START_ADDRESS 0x0000 -+ -+#define MASK_IMG_SOC_BSWAP 0x40000000 -+#define SHIFT_IMG_SOC_BSWAP 30 -+#define REGNUM_IMG_SOC_BSWAP 0x0004 -+ -+#define MASK_IMG_SOC_PW 0x18000000 -+#define SHIFT_IMG_SOC_PW 27 -+#define REGNUM_IMG_SOC_PW 0x0004 -+ -+#define MASK_IMG_SOC_DIR 0x04000000 -+#define SHIFT_IMG_SOC_DIR 26 -+#define REGNUM_IMG_SOC_DIR 0x0004 -+ -+#define MASK_IMG_SOC_PI 0x03000000 -+#define SHIFT_IMG_SOC_PI 24 -+#define REGNUM_IMG_SOC_PI 0x0004 -+#define IMG_SOC_PI_1 0x00000002 -+#define IMG_SOC_PI_2 0x00000001 -+#define IMG_SOC_PI_4 0x00000000 -+ -+#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000 -+#define SHIFT_IMG_SOC_TRANSFER_IEN 29 -+#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004 -+ -+#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \ -+ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \ -+ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \ -+ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \ -+ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \ -+ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT)) -+ -+#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X))) -+#define MASK_IMG_SOC_EXT_SA 0x0000000F -+#define SHIFT_IMG_SOC_EXT_SA 0 -+#define REGNUM_IMG_SOC_EXT_SA 0x0008 -+ -+#define MASK_IMG_SOC_ACC_DEL 0xE0000000 -+#define SHIFT_IMG_SOC_ACC_DEL 29 -+#define REGNUM_IMG_SOC_ACC_DEL 0x0008 -+ -+#define MASK_IMG_SOC_INCR 0x08000000 -+#define SHIFT_IMG_SOC_INCR 27 -+#define REGNUM_IMG_SOC_INCR 0x0008 -+ -+#define MASK_IMG_SOC_BURST 0x07000000 -+#define SHIFT_IMG_SOC_BURST 24 -+#define REGNUM_IMG_SOC_BURST 0x0008 -+ -+#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \ -+((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \ -+(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \ -+(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST)) -+ -+#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X))) -+#define MASK_IMG_SOC_ADDR 0x007FFFFF -+#define SHIFT_IMG_SOC_ADDR 0 -+#define REGNUM_IMG_SOC_ADDR 0x0014 -+ -+/* **************** DMAC define **************** */ -+enum DMAC_eBSwap { -+ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */ -+ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */ -+}; -+ -+enum DMAC_ePW { -+ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */ -+ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */ -+ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */ -+}; -+ -+enum DMAC_eAccDel { -+ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */ -+ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */ -+ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */ -+ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */ -+ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */ -+ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */ -+ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */ -+ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */ -+}; -+ -+enum DMAC_eBurst { -+ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */ -+ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */ -+ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */ -+ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */ -+ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */ -+ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */ -+ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */ -+ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */ -+}; -+ -+/* commands for topaz,shared with user space driver */ -+enum drm_lnc_topaz_cmd { -+ MTX_CMDID_NULL = 0, -+ MTX_CMDID_DO_HEADER = 1, -+ MTX_CMDID_ENCODE_SLICE = 2, -+ MTX_CMDID_WRITEREG = 3, -+ MTX_CMDID_START_PIC = 4, -+ MTX_CMDID_END_PIC = 5, -+ MTX_CMDID_SYNC = 6, -+ MTX_CMDID_ENCODE_ONE_ROW = 7, -+ MTX_CMDID_FLUSH = 8, -+ MTX_CMDID_SW_LEAVE_LOWPOWER = 0xfc, -+ MTX_CMDID_SW_ENTER_LOWPOWER = 0xfe, -+ MTX_CMDID_SW_NEW_CODEC = 0xff -+}; -+ -+/* codecs topaz supports,shared with user space driver */ -+enum drm_lnc_topaz_codec { -+ IMG_CODEC_JPEG = 0, -+ IMG_CODEC_H264_NO_RC, -+ IMG_CODEC_H264_VBR, -+ IMG_CODEC_H264_CBR, -+ IMG_CODEC_H263_NO_RC, -+ IMG_CODEC_H263_VBR, -+ IMG_CODEC_H263_CBR, -+ IMG_CODEC_MPEG4_NO_RC, -+ IMG_CODEC_MPEG4_VBR, -+ IMG_CODEC_MPEG4_CBR, -+ IMG_CODEC_NUM -+}; -+ -+/* XXX: it's a copy of msvdx cmd queue. should have some change? */ -+struct lnc_topaz_cmd_queue { -+ struct list_head head; -+ void *cmd; -+ unsigned long cmd_size; -+ uint32_t sequence; -+}; -+ -+ -+struct topaz_cmd_header { -+ union { -+ struct { -+ unsigned long id:8; -+ unsigned long size:8; -+ unsigned long seq:16; -+ }; -+ uint32_t val; -+ }; -+}; -+ -+/* external function declare */ -+/* lnc_topazinit.c */ -+int lnc_topaz_init(struct drm_device *dev); -+int lnc_topaz_uninit(struct drm_device *dev); -+int lnc_topaz_reset(struct drm_psb_private *dev_priv); -+int topaz_init_fw(struct drm_device *dev); -+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec); -+int topaz_wait_for_register(struct drm_psb_private *dev_priv, -+ uint32_t addr, uint32_t value, -+ uint32_t enable); -+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv, -+ uint32_t byte_addr, uint32_t val); -+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv, -+ uint32_t byte_addr); -+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv, -+ uint32_t addr); -+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv, -+ uint32_t val); -+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv); -+int lnc_topaz_save_mtx_state(struct drm_device *dev); -+int lnc_topaz_restore_mtx_state(struct drm_device *dev); -+ -+/* lnc_topaz.c */ -+void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat); -+ -+int lnc_cmdbuf_video(struct drm_file *priv, -+ struct list_head *validate_list, -+ uint32_t fence_type, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg); -+ -+void lnc_topaz_flush_cmd_queue(struct drm_device *dev); -+void lnc_topaz_lockup(struct drm_psb_private *dev_priv, int *topaz_lockup, -+ int *topaz_idle); -+void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout); -+ -+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver); -+ -+/* macros to get/set CCB control data */ -+#define WB_CCB_CTRL_RINDEX(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb)) -+#define WB_CCB_CTRL_SEQ(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb+1)) -+ -+#define POLL_WB_RINDEX(dev_priv,value) \ -+do { \ -+ int i; \ -+ for (i = 0; i < 10000; i++) { \ -+ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \ -+ break; \ -+ else \ -+ DRM_UDELAY(100); \ -+ } \ -+ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \ -+ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \ -+ ret = -EBUSY; \ -+ } \ -+} while (0) -+ -+#define POLL_WB_SEQ(dev_priv,value) \ -+do { \ -+ int i; \ -+ for (i = 0; i < 10000; i++) { \ -+ if (WB_CCB_CTRL_SEQ(dev_priv) == value) \ -+ break; \ -+ else \ -+ DRM_UDELAY(1000); \ -+ } \ -+ if (WB_CCB_CTRL_SEQ(dev_priv) != value) { \ -+ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%04x(mtx) vs 0x%04x\n",\ -+ WB_CCB_CTRL_SEQ(dev_priv), value); \ -+ ret = -EBUSY; \ -+ } \ -+} while (0) -+ -+#define CCB_CTRL_RINDEX(dev_priv) \ -+ topaz_read_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF) -+ -+#define CCB_CTRL_RINDEX(dev_priv) \ -+ topaz_read_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF) -+ -+#define CCB_CTRL_QP(dev_priv) \ -+ topaz_read_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP) -+ -+#define CCB_CTRL_SEQ(dev_priv) \ -+ topaz_read_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_COMPLETE) -+ -+#define CCB_CTRL_FRAMESKIP(dev_priv) \ -+ topaz_read_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_FRAMESKIP) -+ -+#define CCB_CTRL_SET_QP(dev_priv, qp) \ -+ topaz_write_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP, qp) -+ -+#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \ -+ topaz_write_mtx_mem(dev_priv, \ -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, qp) -+ -+ -+#define TOPAZ_BEGIN_CCB(dev_priv) \ -+ topaz_write_mtx_mem_multiple_setup(dev_priv, \ -+ dev_priv->topaz_ccb_buffer_addr + \ -+ dev_priv->topaz_cmd_windex * 4) -+ -+#define TOPAZ_OUT_CCB(dev_priv, cmd) \ -+do { \ -+ topaz_write_mtx_mem_multiple(dev_priv, cmd); \ -+ dev_priv->topaz_cmd_windex++; \ -+} while (0) -+ -+#define TOPAZ_END_CCB(dev_priv,kick_count) \ -+ topaz_mtx_kick(dev_priv, 1); -+ -+static inline char *cmd_to_string(int cmd_id) -+{ -+ switch (cmd_id) { -+ case MTX_CMDID_START_PIC: -+ return "MTX_CMDID_START_PIC"; -+ case MTX_CMDID_END_PIC: -+ return "MTX_CMDID_END_PIC"; -+ case MTX_CMDID_DO_HEADER: -+ return "MTX_CMDID_DO_HEADER"; -+ case MTX_CMDID_ENCODE_SLICE: -+ return "MTX_CMDID_ENCODE_SLICE"; -+ case MTX_CMDID_SYNC: -+ return "MTX_CMDID_SYNC"; -+ -+ default: -+ return "Undefined command"; -+ -+ } -+} -+ -+static inline char *codec_to_string(int codec) -+{ -+ switch (codec) { -+ case IMG_CODEC_H264_NO_RC: -+ return "H264_NO_RC"; -+ case IMG_CODEC_H264_VBR: -+ return "H264_VBR"; -+ case IMG_CODEC_H264_CBR: -+ return "H264_CBR"; -+ case IMG_CODEC_H263_NO_RC: -+ return "H263_NO_RC"; -+ case IMG_CODEC_H263_VBR: -+ return "H263_VBR"; -+ case IMG_CODEC_H263_CBR: -+ return "H263_CBR"; -+ case IMG_CODEC_MPEG4_NO_RC: -+ return "MPEG4_NO_RC"; -+ case IMG_CODEC_MPEG4_VBR: -+ return "MPEG4_VBR"; -+ case IMG_CODEC_MPEG4_CBR: -+ return "MPEG4_CBR"; -+ default: -+ return "Undefined codec"; -+ } -+} -+ -+static inline void lnc_topaz_enableirq(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; -+ -+ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n"); -+ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) | -+ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */ -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT)); -+ -+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */ -+} -+ -+static inline void lnc_topaz_disableirq(struct drm_device *dev) -+{ -+ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); -+ -+ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n"); -+ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0); -+ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */ -+} -+ -+static inline void lnc_topaz_clearirq(struct drm_device *dev, -+ uint32_t clear_topaz) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n"); -+ if (clear_topaz != 0) -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz); -+ -+ PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); -+} -+ -+static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t val, iir, clear = 0; -+ -+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val); -+ iir = PSB_RVDC32(PSB_INT_IDENTITY_R); -+ -+ if ((val == 0) && (iir == 0)) {/* no interrupt */ -+ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n"); -+ return 0; -+ } -+ -+ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x,IIR=0%08x\n", val, iir); -+ -+ if (val & (1<<31)) -+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x," -+ "sync seq: 0x%08x vs 0x%08x (MTX)\n", -+ CCB_CTRL_SEQ(dev_priv), -+ dev_priv->sequence[LNC_ENGINE_ENCODE], -+ *(uint32_t *)dev_priv->topaz_sync_addr); -+ else -+ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x," -+ "sync seq: 0x%08x vs 0x%08x (MTX)\n", -+ CCB_CTRL_SEQ(dev_priv), -+ dev_priv->sequence[LNC_ENGINE_ENCODE], -+ *(uint32_t *)dev_priv->topaz_sync_addr); -+ -+ if (val & 0x8) { -+ uint32_t mmu_status, mmu_req; -+ -+ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status); -+ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req); -+ -+ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, " -+ "address=0x%08x,mem req=0x%08x\n", -+ mmu_status, mmu_req); -+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT); -+ } -+ -+ if (val & 0x4) { -+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n"); -+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT); -+ } -+ -+ if (val & 0x2) { -+ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n"); -+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX); -+ } -+ -+ if (val & 0x1) { -+ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n"); -+ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA); -+ } -+ -+ return clear; -+} -+ -+#endif /* _LNC_TOPAZ_H_ */ -diff -uNr a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c ---- a/drivers/gpu/drm/psb/lnc_topazinit.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/lnc_topazinit.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1896 @@ -+/** -+ * file lnc_topazinit.c -+ * TOPAZ initialization and mtx-firmware upload -+ * -+ */ -+ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+/* NOTE: (READ BEFORE REFINE CODE) -+ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size -+ * measured by word to DMAC. -+ * -+ * -+ * -+ */ -+ -+/* include headers */ -+ -+/* #define DRM_DEBUG_CODE 2 */ -+ -+#include <linux/firmware.h> -+ -+#include <drm/drmP.h> -+#include <drm/drm.h> -+ -+#include "psb_drv.h" -+#include "lnc_topaz.h" -+ -+/* WARNING: this define is very important */ -+#define RAM_SIZE (1024 * 24) -+ -+static int drm_psb_ospmxxx = 0x10; -+ -+/* register default values -+ * THIS HEADER IS ONLY INCLUDE ONCE*/ -+static unsigned long topaz_default_regs[183][3] = { -+ {MVEA_START, 0x00000000, 0x00000000}, -+ {MVEA_START, 0x00000004, 0x00000400}, -+ {MVEA_START, 0x00000008, 0x00000000}, -+ {MVEA_START, 0x0000000C, 0x00000000}, -+ {MVEA_START, 0x00000010, 0x00000000}, -+ {MVEA_START, 0x00000014, 0x00000000}, -+ {MVEA_START, 0x00000018, 0x00000000}, -+ {MVEA_START, 0x0000001C, 0x00000000}, -+ {MVEA_START, 0x00000020, 0x00000120}, -+ {MVEA_START, 0x00000024, 0x00000000}, -+ {MVEA_START, 0x00000028, 0x00000000}, -+ {MVEA_START, 0x00000100, 0x00000000}, -+ {MVEA_START, 0x00000104, 0x00000000}, -+ {MVEA_START, 0x00000108, 0x00000000}, -+ {MVEA_START, 0x0000010C, 0x00000000}, -+ {MVEA_START, 0x0000011C, 0x00000001}, -+ {MVEA_START, 0x0000012C, 0x00000000}, -+ {MVEA_START, 0x00000180, 0x00000000}, -+ {MVEA_START, 0x00000184, 0x00000000}, -+ {MVEA_START, 0x00000188, 0x00000000}, -+ {MVEA_START, 0x0000018C, 0x00000000}, -+ {MVEA_START, 0x00000190, 0x00000000}, -+ {MVEA_START, 0x00000194, 0x00000000}, -+ {MVEA_START, 0x00000198, 0x00000000}, -+ {MVEA_START, 0x0000019C, 0x00000000}, -+ {MVEA_START, 0x000001A0, 0x00000000}, -+ {MVEA_START, 0x000001A4, 0x00000000}, -+ {MVEA_START, 0x000001A8, 0x00000000}, -+ {MVEA_START, 0x000001AC, 0x00000000}, -+ {MVEA_START, 0x000001B0, 0x00000000}, -+ {MVEA_START, 0x000001B4, 0x00000000}, -+ {MVEA_START, 0x000001B8, 0x00000000}, -+ {MVEA_START, 0x000001BC, 0x00000000}, -+ {MVEA_START, 0x000001F8, 0x00000000}, -+ {MVEA_START, 0x000001FC, 0x00000000}, -+ {MVEA_START, 0x00000200, 0x00000000}, -+ {MVEA_START, 0x00000204, 0x00000000}, -+ {MVEA_START, 0x00000208, 0x00000000}, -+ {MVEA_START, 0x0000020C, 0x00000000}, -+ {MVEA_START, 0x00000210, 0x00000000}, -+ {MVEA_START, 0x00000220, 0x00000001}, -+ {MVEA_START, 0x00000224, 0x0000001F}, -+ {MVEA_START, 0x00000228, 0x00000100}, -+ {MVEA_START, 0x0000022C, 0x00001F00}, -+ {MVEA_START, 0x00000230, 0x00000101}, -+ {MVEA_START, 0x00000234, 0x00001F1F}, -+ {MVEA_START, 0x00000238, 0x00001F01}, -+ {MVEA_START, 0x0000023C, 0x0000011F}, -+ {MVEA_START, 0x00000240, 0x00000200}, -+ {MVEA_START, 0x00000244, 0x00001E00}, -+ {MVEA_START, 0x00000248, 0x00000002}, -+ {MVEA_START, 0x0000024C, 0x0000001E}, -+ {MVEA_START, 0x00000250, 0x00000003}, -+ {MVEA_START, 0x00000254, 0x0000001D}, -+ {MVEA_START, 0x00000258, 0x00001F02}, -+ {MVEA_START, 0x0000025C, 0x00000102}, -+ {MVEA_START, 0x00000260, 0x0000011E}, -+ {MVEA_START, 0x00000264, 0x00000000}, -+ {MVEA_START, 0x00000268, 0x00000000}, -+ {MVEA_START, 0x0000026C, 0x00000000}, -+ {MVEA_START, 0x00000270, 0x00000000}, -+ {MVEA_START, 0x00000274, 0x00000000}, -+ {MVEA_START, 0x00000278, 0x00000000}, -+ {MVEA_START, 0x00000280, 0x00008000}, -+ {MVEA_START, 0x00000284, 0x00000000}, -+ {MVEA_START, 0x00000288, 0x00000000}, -+ {MVEA_START, 0x0000028C, 0x00000000}, -+ {MVEA_START, 0x00000314, 0x00000000}, -+ {MVEA_START, 0x00000318, 0x00000000}, -+ {MVEA_START, 0x0000031C, 0x00000000}, -+ {MVEA_START, 0x00000320, 0x00000000}, -+ {MVEA_START, 0x00000324, 0x00000000}, -+ {MVEA_START, 0x00000348, 0x00000000}, -+ {MVEA_START, 0x00000380, 0x00000000}, -+ {MVEA_START, 0x00000384, 0x00000000}, -+ {MVEA_START, 0x00000388, 0x00000000}, -+ {MVEA_START, 0x0000038C, 0x00000000}, -+ {MVEA_START, 0x00000390, 0x00000000}, -+ {MVEA_START, 0x00000394, 0x00000000}, -+ {MVEA_START, 0x00000398, 0x00000000}, -+ {MVEA_START, 0x0000039C, 0x00000000}, -+ {MVEA_START, 0x000003A0, 0x00000000}, -+ {MVEA_START, 0x000003A4, 0x00000000}, -+ {MVEA_START, 0x000003A8, 0x00000000}, -+ {MVEA_START, 0x000003B0, 0x00000000}, -+ {MVEA_START, 0x000003B4, 0x00000000}, -+ {MVEA_START, 0x000003B8, 0x00000000}, -+ {MVEA_START, 0x000003BC, 0x00000000}, -+ {MVEA_START, 0x000003D4, 0x00000000}, -+ {MVEA_START, 0x000003D8, 0x00000000}, -+ {MVEA_START, 0x000003DC, 0x00000000}, -+ {MVEA_START, 0x000003E0, 0x00000000}, -+ {MVEA_START, 0x000003E4, 0x00000000}, -+ {MVEA_START, 0x000003EC, 0x00000000}, -+ {MVEA_START, 0x000002D0, 0x00000000}, -+ {MVEA_START, 0x000002D4, 0x00000000}, -+ {MVEA_START, 0x000002D8, 0x00000000}, -+ {MVEA_START, 0x000002DC, 0x00000000}, -+ {MVEA_START, 0x000002E0, 0x00000000}, -+ {MVEA_START, 0x000002E4, 0x00000000}, -+ {MVEA_START, 0x000002E8, 0x00000000}, -+ {MVEA_START, 0x000002EC, 0x00000000}, -+ {MVEA_START, 0x000002F0, 0x00000000}, -+ {MVEA_START, 0x000002F4, 0x00000000}, -+ {MVEA_START, 0x000002F8, 0x00000000}, -+ {MVEA_START, 0x000002FC, 0x00000000}, -+ {MVEA_START, 0x00000300, 0x00000000}, -+ {MVEA_START, 0x00000304, 0x00000000}, -+ {MVEA_START, 0x00000308, 0x00000000}, -+ {MVEA_START, 0x0000030C, 0x00000000}, -+ {MVEA_START, 0x00000290, 0x00000000}, -+ {MVEA_START, 0x00000294, 0x00000000}, -+ {MVEA_START, 0x00000298, 0x00000000}, -+ {MVEA_START, 0x0000029C, 0x00000000}, -+ {MVEA_START, 0x000002A0, 0x00000000}, -+ {MVEA_START, 0x000002A4, 0x00000000}, -+ {MVEA_START, 0x000002A8, 0x00000000}, -+ {MVEA_START, 0x000002AC, 0x00000000}, -+ {MVEA_START, 0x000002B0, 0x00000000}, -+ {MVEA_START, 0x000002B4, 0x00000000}, -+ {MVEA_START, 0x000002B8, 0x00000000}, -+ {MVEA_START, 0x000002BC, 0x00000000}, -+ {MVEA_START, 0x000002C0, 0x00000000}, -+ {MVEA_START, 0x000002C4, 0x00000000}, -+ {MVEA_START, 0x000002C8, 0x00000000}, -+ {MVEA_START, 0x000002CC, 0x00000000}, -+ {MVEA_START, 0x00000080, 0x00000000}, -+ {MVEA_START, 0x00000084, 0x80705700}, -+ {MVEA_START, 0x00000088, 0x00000000}, -+ {MVEA_START, 0x0000008C, 0x00000000}, -+ {MVEA_START, 0x00000090, 0x00000000}, -+ {MVEA_START, 0x00000094, 0x00000000}, -+ {MVEA_START, 0x00000098, 0x00000000}, -+ {MVEA_START, 0x0000009C, 0x00000000}, -+ {MVEA_START, 0x000000A0, 0x00000000}, -+ {MVEA_START, 0x000000A4, 0x00000000}, -+ {MVEA_START, 0x000000A8, 0x00000000}, -+ {MVEA_START, 0x000000AC, 0x00000000}, -+ {MVEA_START, 0x000000B0, 0x00000000}, -+ {MVEA_START, 0x000000B4, 0x00000000}, -+ {MVEA_START, 0x000000B8, 0x00000000}, -+ {MVEA_START, 0x000000BC, 0x00000000}, -+ {MVEA_START, 0x000000C0, 0x00000000}, -+ {MVEA_START, 0x000000C4, 0x00000000}, -+ {MVEA_START, 0x000000C8, 0x00000000}, -+ {MVEA_START, 0x000000CC, 0x00000000}, -+ {MVEA_START, 0x000000D0, 0x00000000}, -+ {MVEA_START, 0x000000D4, 0x00000000}, -+ {MVEA_START, 0x000000D8, 0x00000000}, -+ {MVEA_START, 0x000000DC, 0x00000000}, -+ {MVEA_START, 0x000000E0, 0x00000000}, -+ {MVEA_START, 0x000000E4, 0x00000000}, -+ {MVEA_START, 0x000000E8, 0x00000000}, -+ {MVEA_START, 0x000000EC, 0x00000000}, -+ {MVEA_START, 0x000000F0, 0x00000000}, -+ {MVEA_START, 0x000000F4, 0x00000000}, -+ {MVEA_START, 0x000000F8, 0x00000000}, -+ {MVEA_START, 0x000000FC, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000000, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000004, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000008, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000000C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000010, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000014, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000001C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000020, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000024, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000002C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000034, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000038, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000003C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000040, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000044, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000048, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000004C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000050, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000054, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000058, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000005C, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000060, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000064, 0x00000000}, -+ {TOPAZ_VLC_START, 0x00000068, 0x00000000}, -+ {TOPAZ_VLC_START, 0x0000006C, 0x00000000} -+}; -+ -+#define FIRMWARE_NAME "topaz_fw.bin" -+ -+/* define structure */ -+/* firmware file's info head */ -+struct topaz_fwinfo { -+ unsigned int ver:16; -+ unsigned int codec:16; -+ -+ unsigned int text_size; -+ unsigned int data_size; -+ unsigned int data_location; -+}; -+ -+/* firmware data array define */ -+struct topaz_codec_fw { -+ uint32_t ver; -+ uint32_t codec; -+ -+ uint32_t text_size; -+ uint32_t data_size; -+ uint32_t data_location; -+ -+ struct ttm_buffer_object *text; -+ struct ttm_buffer_object *data; -+}; -+ -+ -+ -+/* static function define */ -+static int topaz_upload_fw(struct drm_device *dev, -+ enum drm_lnc_topaz_codec codec); -+static inline void topaz_set_default_regs(struct drm_psb_private -+ *dev_priv); -+ -+#define UPLOAD_FW_BY_DMA 1 -+ -+#if UPLOAD_FW_BY_DMA -+static void topaz_dma_transfer(struct drm_psb_private *dev_priv, -+ uint32_t channel, uint32_t src_phy_addr, -+ uint32_t offset, uint32_t dst_addr, -+ uint32_t byte_num, uint32_t is_increment, -+ uint32_t is_write); -+#else -+static void topaz_mtx_upload_by_register(struct drm_device *dev, -+ uint32_t mtx_mem, uint32_t addr, -+ uint32_t size, -+ struct ttm_buffer_object *buf); -+#endif -+ -+static void topaz_write_core_reg(struct drm_psb_private *dev_priv, -+ uint32_t reg, const uint32_t val); -+static void topaz_read_core_reg(struct drm_psb_private *dev_priv, -+ uint32_t reg, uint32_t *ret_val); -+static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv); -+static void release_mtx_control_from_dash(struct drm_psb_private -+ *dev_priv); -+static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv); -+static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, -+ uint32_t size); -+static void mtx_dma_write(struct drm_device *dev); -+ -+ -+#if 0 /* DEBUG_FUNCTION */ -+static int topaz_test_null(struct drm_device *dev, uint32_t seq); -+static void topaz_mmu_flush(struct drm_device *dev); -+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value); -+#endif -+#if 0 -+static void topaz_save_default_regs(struct drm_psb_private *dev_priv, -+ uint32_t *data); -+static void topaz_restore_default_regs(struct drm_psb_private *dev_priv, -+ uint32_t *data); -+#endif -+ -+/* globale variable define */ -+struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM]; -+ -+uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv, -+ uint32_t byte_addr) -+{ -+ uint32_t read_val; -+ uint32_t reg, bank_size, ram_bank_size, ram_id; -+ -+ TOPAZ_READ32(0x3c, ®); -+ reg = 0x0a0a0606; -+ bank_size = (reg & 0xF0000) >> 16; -+ -+ ram_bank_size = (uint32_t) (1 << (bank_size + 2)); -+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; -+ -+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, -+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | -+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) | -+ F_ENCODE(1, MTX_MTX_MCMR)); -+ -+ /* ?? poll this reg? */ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS, -+ 1, 1); -+ -+ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val); -+ -+ return read_val; -+} -+ -+void topaz_write_mtx_mem(struct drm_psb_private *dev_priv, -+ uint32_t byte_addr, uint32_t val) -+{ -+ uint32_t ram_id = 0; -+ uint32_t reg, bank_size, ram_bank_size; -+ -+ TOPAZ_READ32(0x3c, ®); -+ -+ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */ -+ reg = 0x0a0a0606; -+ -+ bank_size = (reg & 0xF0000) >> 16; -+ -+ ram_bank_size = (uint32_t) (1 << (bank_size + 2)); -+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; -+ -+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, -+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | -+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR)); -+ -+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val); -+ -+ /* ?? poll this reg? */ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS, -+ 1, 1); -+ -+ return; -+} -+ -+void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv, -+ uint32_t byte_addr) -+{ -+ uint32_t ram_id = 0; -+ uint32_t reg, bank_size, ram_bank_size; -+ -+ TOPAZ_READ32(0x3c, ®); -+ -+ reg = 0x0a0a0606; -+ -+ bank_size = (reg & 0xF0000) >> 16; -+ -+ ram_bank_size = (uint32_t) (1 << (bank_size + 2)); -+ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size; -+ -+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL, -+ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) | -+ F_ENCODE(1, MTX_MTX_MCMAI) | -+ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR)); -+} -+ -+void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv, -+ uint32_t val) -+{ -+ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val); -+} -+ -+ -+int topaz_wait_for_register(struct drm_psb_private *dev_priv, -+ uint32_t addr, uint32_t value, uint32_t mask) -+{ -+ uint32_t tmp; -+ uint32_t count = 10000; -+ -+ /* # poll topaz register for certain times */ -+ while (count) { -+ /* #.# read */ -+ MM_READ32(addr, 0, &tmp); -+ -+ if (value == (tmp & mask)) -+ return 0; -+ -+ /* #.# delay and loop */ -+ DRM_UDELAY(100); -+ --count; -+ } -+ -+ /* # now waiting is timeout, return 1 indicat failed */ -+ /* XXX: testsuit means a timeout 10000 */ -+ -+ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), " -+ "actual 0x%08x (0x%08x & 0x%08x)\n", -+ addr, value, tmp & mask, tmp, mask); -+ -+ return -EBUSY; -+ -+} -+ -+ -+void lnc_topaz_reset_wq(struct work_struct *work) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(work, struct drm_psb_private, topaz_watchdog_wq); -+ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ mutex_lock(&dev_priv->topaz_mutex); -+ dev_priv->topaz_needs_reset = 1; -+ dev_priv->topaz_current_sequence++; -+ PSB_DEBUG_GENERAL -+ ("MSVDXFENCE: incremented topaz_current_sequence to :%d\n", -+ dev_priv->topaz_current_sequence); -+ -+ psb_fence_error(scheduler->dev, LNC_ENGINE_ENCODE, -+ dev_priv->topaz_current_sequence, _PSB_FENCE_TYPE_EXE, -+ DRM_CMD_HANG); -+ -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ dev_priv->timer_available = 1; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+ -+ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags); -+ -+ /* psb_msvdx_flush_cmd_queue(scheduler->dev); */ -+ -+ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags); -+ -+ psb_schedule_watchdog(dev_priv); -+ mutex_unlock(&dev_priv->topaz_mutex); -+} -+ -+ -+/* this function finish the first part of initialization, the rest -+ * should be done in topaz_setup_fw -+ */ -+int lnc_topaz_init(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ uint32_t core_id, core_rev; -+ void *topaz_bo_virt; -+ int ret = 0; -+ bool is_iomem; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n"); -+ -+ /* # initialize comand topaz queueing [msvdx_queue] */ -+ INIT_LIST_HEAD(&dev_priv->topaz_queue); -+ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */ -+ mutex_init(&dev_priv->topaz_mutex); -+ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */ -+ spin_lock_init(&dev_priv->topaz_lock); -+ -+ /* # topaz status init. [msvdx_busy] */ -+ dev_priv->topaz_busy = 0; -+ dev_priv->topaz_cmd_seq = 0; -+ dev_priv->topaz_fw_loaded = 0; -+ dev_priv->topaz_cur_codec = 0; -+ dev_priv->topaz_mtx_data_mem = NULL; -+ dev_priv->cur_mtx_data_size = 0; -+ -+ dev_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE, -+ GFP_KERNEL); -+ if (dev_priv->topaz_mtx_reg_state == NULL) { -+ DRM_ERROR("TOPAZ: failed to allocate space " -+ "for mtx register\n"); -+ return -1; -+ } -+ -+ /* # gain write back structure,we may only need 32+4=40DW */ -+ if (!dev_priv->topaz_bo) { -+ ret = ttm_buffer_object_create(bdev, 4096, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, -+ 0, 0, 0, NULL, &(dev_priv->topaz_bo)); -+ if (ret != 0) { -+ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n"); -+ return ret; -+ } -+ } -+ -+ ret = ttm_bo_kmap(dev_priv->topaz_bo, 0, -+ dev_priv->topaz_bo->num_pages, -+ &dev_priv->topaz_bo_kmap); -+ if (ret) { -+ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n"); -+ ttm_bo_unref(&dev_priv->topaz_bo); -+ return ret; -+ } -+ -+ topaz_bo_virt = ttm_kmap_obj_virtual(&dev_priv->topaz_bo_kmap, -+ &is_iomem); -+ dev_priv->topaz_ccb_wb = (void *) topaz_bo_virt; -+ dev_priv->topaz_wb_offset = dev_priv->topaz_bo->offset; -+ dev_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt + 2048); -+ dev_priv->topaz_sync_offset = dev_priv->topaz_wb_offset + 2048; -+ PSB_DEBUG_GENERAL("TOPAZ: allocated BO for WriteBack and SYNC command," -+ "WB offset=0x%08x, SYNC offset=0x%08x\n", -+ dev_priv->topaz_wb_offset, dev_priv->topaz_sync_offset); -+ -+ *(dev_priv->topaz_sync_addr) = ~0; /* reset sync seq */ -+ -+ /* # reset topaz */ -+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); -+ -+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); -+ -+ /* # set up MMU */ -+ topaz_mmu_hwsetup(dev_priv); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place" -+ "when receiving user space commands\n"); -+ -+#if 0 /* can't load FW here */ -+ /* #.# load fw to driver */ -+ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n"); -+ ret = topaz_init_fw(dev); -+ if (ret != 0) -+ return -1; -+ -+ topaz_setup_fw(dev, FW_H264_NO_RC);/* just for test */ -+#endif -+ /* <msvdx does> # minimal clock */ -+ -+ /* <msvdx does> # return 0 */ -+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id); -+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n", -+ core_id, core_rev); -+ -+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) -+ psb_power_down_topaz(dev); -+ -+ return 0; -+} -+ -+int lnc_topaz_uninit(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ /* int n;*/ -+ -+ /* flush MMU */ -+ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n"); -+ /* topaz_mmu_flushcache (dev_priv); */ -+ -+ /* # reset TOPAZ chip */ -+ lnc_topaz_reset(dev_priv); -+ -+ /* release resources */ -+ /* # release write back memory */ -+ dev_priv->topaz_ccb_wb = NULL; -+ -+ ttm_bo_unref(&dev_priv->topaz_bo); -+ -+ /* release mtx register save space */ -+ kfree(dev_priv->topaz_mtx_reg_state); -+ -+ /* release mtx data memory save space */ -+ if (dev_priv->topaz_mtx_data_mem) -+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem); -+ -+ /* # release firmware */ -+ /* XXX: but this handlnig should be reconsidered */ -+ /* XXX: there is no jpeg firmware...... */ -+#if 0 /* FIX WHEN FIRMWARE IS LOADED */ -+ for (n = 1; n < IMG_CODEC_NUM; ++n) { -+ ttm_bo_unref(&topaz_fw[n].text); -+ ttm_bo_unref(&topaz_fw[n].data); -+ } -+#endif -+ ttm_bo_kunmap(&dev_priv->topaz_bo_kmap); -+ ttm_bo_unref(&dev_priv->topaz_bo); -+ -+ return 0; -+} -+ -+int lnc_topaz_reset(struct drm_psb_private *dev_priv) -+{ -+ return 0; -+#if 0 -+ int ret = 0; -+ /* # software reset */ -+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, -+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); -+ -+ /* # call lnc_wait_for_register, wait reset finished */ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + MTX_CORE_CR_MTX_ENABLE_OFFSET, -+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK, -+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK); -+ -+ /* # if reset finised */ -+ PSB_DEBUG_GENERAL("XXX: add condition judgement for topaz wait...\n"); -+ /* #.# clear interrupt enable flag */ -+ -+ /* #.# clear pending interrupt flags */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT) -+ ); -+ /* # destroy topaz mutex in drm_psb_privaet [msvdx_mutex] */ -+ -+ /* # return register value which is waited above */ -+ -+ PSB_DEBUG_GENERAL("called\n"); -+ return 0; -+#endif -+} -+ -+/* read firmware bin file and load all data into driver */ -+int topaz_init_fw(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ const struct firmware *raw = NULL; -+ unsigned char *ptr; -+ int ret = 0; -+ int n; -+ struct topaz_fwinfo *cur_fw; -+ int cur_size; -+ struct topaz_codec_fw *cur_codec; -+ struct ttm_buffer_object **cur_drm_obj; -+ struct ttm_bo_kmap_obj tmp_kmap; -+ bool is_iomem; -+ -+ dev_priv->stored_initial_qp = 0; -+ -+ /* # get firmware */ -+ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev); -+ if (ret != 0) { -+ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret); -+ return ret; -+ } -+ -+ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n"); -+ -+ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) { -+ DRM_ERROR("TOPAZ: firmware file is not correct size.\n"); -+ goto out; -+ } -+ -+ ptr = (unsigned char *) raw->data; -+ -+ if (!ptr) { -+ DRM_ERROR("TOPAZ: failed to load firmware.\n"); -+ goto out; -+ } -+ -+ /* # load fw from file */ -+ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n"); -+ cur_fw = NULL; -+ /* didn't use the first element */ -+ for (n = 1; n < IMG_CODEC_NUM; ++n) { -+ cur_fw = (struct topaz_fwinfo *) ptr; -+ -+ cur_codec = &topaz_fw[cur_fw->codec]; -+ cur_codec->ver = cur_fw->ver; -+ cur_codec->codec = cur_fw->codec; -+ cur_codec->text_size = cur_fw->text_size; -+ cur_codec->data_size = cur_fw->data_size; -+ cur_codec->data_location = cur_fw->data_location; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n", -+ codec_to_string(cur_fw->codec)); -+ -+ /* #.# handle text section */ -+ cur_codec->text = NULL; -+ ptr += sizeof(struct topaz_fwinfo); -+ cur_drm_obj = &cur_codec->text; -+ cur_size = cur_fw->text_size; -+ -+ /* #.# malloc DRM object for fw storage */ -+ ret = ttm_buffer_object_create(bdev, cur_size, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, -+ 0, 0, 0, NULL, cur_drm_obj); -+ if (ret) { -+ DRM_ERROR("Failed to allocate firmware.\n"); -+ goto out; -+ } -+ -+ /* #.# fill DRM object with firmware data */ -+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages, -+ &tmp_kmap); -+ if (ret) { -+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret); -+ ttm_bo_unref(cur_drm_obj); -+ *cur_drm_obj = NULL; -+ goto out; -+ } -+ -+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr, -+ cur_size); -+ -+ ttm_bo_kunmap(&tmp_kmap); -+ -+ /* #.# handle data section */ -+ cur_codec->data = NULL; -+ ptr += cur_fw->text_size; -+ cur_drm_obj = &cur_codec->data; -+ cur_size = cur_fw->data_size; -+ -+ /* #.# malloc DRM object for fw storage */ -+ ret = ttm_buffer_object_create(bdev, cur_size, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, -+ 0, 0, 0, NULL, cur_drm_obj); -+ if (ret) { -+ DRM_ERROR("Failed to allocate firmware.\n"); -+ goto out; -+ } -+ -+ /* #.# fill DRM object with firmware data */ -+ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages, -+ &tmp_kmap); -+ if (ret) { -+ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret); -+ ttm_bo_unref(cur_drm_obj); -+ *cur_drm_obj = NULL; -+ goto out; -+ } -+ -+ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr, -+ cur_size); -+ -+ ttm_bo_kunmap(&tmp_kmap); -+ -+ /* #.# validate firmware */ -+ -+ /* #.# update ptr */ -+ ptr += cur_fw->data_size; -+ } -+ -+ release_firmware(raw); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n"); -+ -+ return 0; -+ -+out: -+ if (raw) { -+ PSB_DEBUG_GENERAL("release firmware....\n"); -+ release_firmware(raw); -+ } -+ -+ return -1; -+} -+ -+/* setup fw when start a new context */ -+int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ uint32_t mem_size = RAM_SIZE; /* follow DDK */ -+ uint32_t verify_pc; -+ int ret; -+ -+#if 0 -+ if (codec == dev_priv->topaz_current_codec) { -+ LNC_TRACEL("TOPAZ: reuse previous codec\n"); -+ return 0; -+ } -+#endif -+ -+ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) -+ psb_power_up_topaz(dev); -+ -+ /* XXX: need to rest topaz? */ -+ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n"); -+ -+ /* XXX: interrupt enable shouldn't be enable here, -+ * this funtion is called when interrupt is enable, -+ * but here, we've no choice since we have to call setup_fw by -+ * manual */ -+ /* # upload firmware, clear interruputs and start the firmware -+ * -- from hostutils.c in TestSuits*/ -+ -+ /* # reset MVEA */ -+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | -+ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); -+ -+ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST, -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) | -+ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET)); -+ -+ -+ topaz_mmu_hwsetup(dev_priv); -+ -+#if !LNC_TOPAZ_NO_IRQ -+ lnc_topaz_disableirq(dev); -+#endif -+ -+ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n"); -+ -+ topaz_set_default_regs(dev_priv); -+ -+ /* # reset mtx */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) | -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET)); -+ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0); -+ -+ /* # upload fw by drm */ -+ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n"); -+ -+ topaz_upload_fw(dev, codec); -+ -+ /* allocate the space for context save & restore if needed */ -+ if (dev_priv->topaz_mtx_data_mem == NULL) { -+ ret = ttm_buffer_object_create(bdev, -+ dev_priv->cur_mtx_data_size * 4, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU | -+ TTM_PL_FLAG_NO_EVICT, -+ 0, 0, 0, NULL, -+ &dev_priv->topaz_mtx_data_mem); -+ if (ret) { -+ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for " -+ "mtx data save\n"); -+ return -1; -+ } -+ } -+ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n"); -+ -+ /* XXX: In power save mode, need to save the complete data memory -+ * and restore it. MTX_FWIF.c record the data size */ -+ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n"); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n"); -+ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n"); -+ -+ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc); -+ -+ /* enable auto clock is essential for this driver */ -+ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE, -+ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) | -+ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE)); -+ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING, -+ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) | -+ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) | -+ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) | -+ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE)); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n", -+ verify_pc, PC_START_ADDRESS); -+ -+ /* # turn on MTX */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX)); -+ -+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, -+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK); -+ -+ /* # poll on the interrupt which the firmware will generate */ -+ topaz_wait_for_register(dev_priv, -+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX), -+ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX)); -+ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, -+ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX)); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n"); -+ -+ /* # get ccb buffer addr -- file hostutils.c */ -+ dev_priv->topaz_ccb_buffer_addr = -+ topaz_read_mtx_mem(dev_priv, -+ MTX_DATA_MEM_BASE + mem_size - 4); -+ dev_priv->topaz_ccb_ctrl_addr = -+ topaz_read_mtx_mem(dev_priv, -+ MTX_DATA_MEM_BASE + mem_size - 8); -+ dev_priv->topaz_ccb_size = -+ topaz_read_mtx_mem(dev_priv, -+ dev_priv->topaz_ccb_ctrl_addr + -+ MTX_CCBCTRL_CCBSIZE); -+ -+ dev_priv->topaz_cmd_windex = 0; -+ -+ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n", -+ dev_priv->topaz_ccb_buffer_addr, -+ dev_priv->topaz_ccb_ctrl_addr, -+ dev_priv->topaz_ccb_size); -+ -+ /* # write back the initial QP Value */ -+ topaz_write_mtx_mem(dev_priv, -+ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, -+ dev_priv->stored_initial_qp); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n", -+ dev_priv->topaz_wb_offset); -+ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12, -+ dev_priv->topaz_wb_offset); -+ -+ /* this kick is essential for mtx.... */ -+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x01020304; -+ topaz_mtx_kick(dev_priv, 1); -+ DRM_UDELAY(1000); -+ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory," -+ " and here it is 0x%08x\n", -+ *((uint32_t *) dev_priv->topaz_ccb_wb)); -+ -+ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */ -+ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n"); -+ -+ /* XXX: is there any need to record next cmd num?? -+ * we use fence seqence number to record it -+ */ -+ dev_priv->topaz_busy = 0; -+ dev_priv->topaz_cmd_seq = 0; -+ -+#if !LNC_TOPAZ_NO_IRQ -+ lnc_topaz_enableirq(dev); -+#endif -+ -+#if 0 -+ /* test sync command */ -+ { -+ uint32_t sync_cmd[3]; -+ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr; -+ int count = 10000; -+ -+ /* insert a SYNC command here */ -+ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) | -+ (0x5b << 16); -+ sync_cmd[1] = dev_priv->topaz_sync_offset; -+ sync_cmd[2] = 0x3c; -+ -+ TOPAZ_BEGIN_CCB(dev_priv); -+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]); -+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]); -+ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]); -+ TOPAZ_END_CCB(dev_priv, 1); -+ -+ while (count && *sync_p != 0x3c) { -+ DRM_UDELAY(1000); -+ --count; -+ } -+ if ((count == 0) && (*sync_p != 0x3c)) { -+ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x)," -+ "actual 0x%08x\n", -+ 0x3c, *sync_p); -+ } -+ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p); -+ } -+#endif -+#if 0 -+ topaz_mmu_flush(dev); -+ -+ topaz_test_null(dev, 0xe1e1); -+ topaz_test_null(dev, 0xe2e2); -+ topaz_mmu_test(dev, 0x12345678); -+ topaz_test_null(dev, 0xe3e3); -+ topaz_mmu_test(dev, 0x8764321); -+ -+ topaz_test_null(dev, 0xe4e4); -+ topaz_test_null(dev, 0xf3f3); -+#endif -+ -+ return 0; -+} -+ -+#if UPLOAD_FW_BY_DMA -+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ const struct topaz_codec_fw *cur_codec_fw; -+ uint32_t text_size, data_size; -+ uint32_t data_location; -+ uint32_t cur_mtx_data_size; -+ -+ /* # refer HLD document */ -+ -+ /* # MTX reset */ -+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n"); -+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, -+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); -+ -+ DRM_UDELAY(6000); -+ -+ /* # upload the firmware by DMA */ -+ cur_codec_fw = &topaz_fw[codec]; -+ -+ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d" -+ " data location(%d)\n", codec_to_string(codec), codec, -+ cur_codec_fw->text_size, cur_codec_fw->data_size, -+ cur_codec_fw->data_location); -+ -+ /* # upload text */ -+ text_size = cur_codec_fw->text_size / 4; -+ -+ /* setup the MTX to start recieving data: -+ use a register for the transfer which will point to the source -+ (MTX_CR_MTX_SYSC_CDMAT) */ -+ /* #.# fill the dst addr */ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000); -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, -+ F_ENCODE(2, MTX_BURSTSIZE) | -+ F_ENCODE(0, MTX_RNW) | -+ F_ENCODE(1, MTX_ENABLE) | -+ F_ENCODE(text_size, MTX_LENGTH)); -+ -+ /* #.# set DMAC access to host memory via BIF */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); -+ -+ /* #.# transfer the codec */ -+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0, -+ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0); -+ -+ /* #.# wait dma finish */ -+ topaz_wait_for_register(dev_priv, -+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); -+ -+ /* #.# clear interrupt */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); -+ -+ /* # return access to topaz core */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); -+ -+ /* # upload data */ -+ data_size = cur_codec_fw->data_size / 4; -+ data_location = cur_codec_fw->data_location; -+ -+ /* #.# fill the dst addr */ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, -+ 0x80900000 + data_location - 0x82880000); -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, -+ F_ENCODE(2, MTX_BURSTSIZE) | -+ F_ENCODE(0, MTX_RNW) | -+ F_ENCODE(1, MTX_ENABLE) | -+ F_ENCODE(data_size, MTX_LENGTH)); -+ -+ /* #.# set DMAC access to host memory via BIF */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); -+ -+ /* #.# transfer the codec */ -+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0, -+ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0); -+ -+ /* #.# wait dma finish */ -+ topaz_wait_for_register(dev_priv, -+ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); -+ -+ /* #.# clear interrupt */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); -+ -+ /* # return access to topaz core */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); -+ -+ /* record this codec's mtx data size for -+ * context save & restore */ -+ cur_mtx_data_size = RAM_SIZE - (data_location - 0x82880000); -+ if (dev_priv->cur_mtx_data_size != cur_mtx_data_size) { -+ dev_priv->cur_mtx_data_size = cur_mtx_data_size; -+ if (dev_priv->topaz_mtx_data_mem) -+ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem); -+ dev_priv->topaz_mtx_data_mem = NULL; -+ } -+ -+ return 0; -+} -+ -+#else -+ -+void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem, -+ uint32_t addr, uint32_t size, -+ struct ttm_buffer_object *buf) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t *buf_p; -+ uint32_t debug_reg, bank_size, bank_ram_size, bank_count; -+ uint32_t cur_ram_id, ram_addr , ram_id; -+ int map_ret, lp; -+ struct ttm_bo_kmap_obj bo_kmap; -+ bool is_iomem; -+ uint32_t cur_addr; -+ -+ get_mtx_control_from_dash(dev_priv); -+ -+ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap); -+ if (map_ret) { -+ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret); -+ return; -+ } -+ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem); -+ -+ -+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg); -+ debug_reg = 0x0a0a0606; -+ bank_size = (debug_reg & 0xf0000) >> 16; -+ bank_ram_size = 1 << (bank_size + 2); -+ -+ bank_count = (debug_reg & 0xf00) >> 8; -+ -+ topaz_wait_for_register(dev_priv, -+ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET, -+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK, -+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK); -+ -+ cur_ram_id = -1; -+ cur_addr = addr; -+ for (lp = 0; lp < size / 4; ++lp) { -+ ram_id = mtx_mem + (cur_addr / bank_ram_size); -+ -+ if (cur_ram_id != ram_id) { -+ ram_addr = cur_addr >> 2; -+ -+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, -+ F_ENCODE(ram_id, MTX_MTX_MCMID) | -+ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) | -+ F_ENCODE(1, MTX_MTX_MCMAI)); -+ -+ cur_ram_id = ram_id; -+ } -+ cur_addr += 4; -+ -+ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET, -+ *(buf_p + lp)); -+ -+ topaz_wait_for_register(dev_priv, -+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START, -+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK, -+ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK); -+ } -+ -+ ttm_bo_kunmap(&bo_kmap); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n"); -+ return; -+} -+ -+int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ const struct topaz_codec_fw *cur_codec_fw; -+ uint32_t text_size, data_size; -+ uint32_t data_location; -+ -+ /* # refer HLD document */ -+ /* # MTX reset */ -+ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n"); -+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, -+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); -+ -+ DRM_UDELAY(6000); -+ -+ /* # upload the firmware by DMA */ -+ cur_codec_fw = &topaz_fw[codec]; -+ -+ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)" -+ " data location(0x%08x)\n", codec_to_string(codec), -+ cur_codec_fw->text_size, cur_codec_fw->data_size, -+ cur_codec_fw->data_location); -+ -+ /* # upload text */ -+ text_size = cur_codec_fw->text_size; -+ -+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM, -+ PC_START_ADDRESS - MTX_MEMORY_BASE, -+ text_size, cur_codec_fw->text); -+ -+ /* # upload data */ -+ data_size = cur_codec_fw->data_size; -+ data_location = cur_codec_fw->data_location; -+ -+ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM, -+ data_location - 0x82880000, data_size, -+ cur_codec_fw->data); -+ -+ return 0; -+} -+ -+#endif /* UPLOAD_FW_BY_DMA */ -+ -+void -+topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel, -+ uint32_t src_phy_addr, uint32_t offset, -+ uint32_t soc_addr, uint32_t byte_num, -+ uint32_t is_increment, uint32_t is_write) -+{ -+ uint32_t dmac_count; -+ uint32_t irq_stat; -+ uint32_t count; -+ -+ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n"); -+ /* # check that no transfer is currently in progress and no -+ interrupts are outstanding ?? (why care interrupt) */ -+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count); -+ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) -+ DRM_ERROR("TOPAZ: there is tranfer in progress\n"); -+ -+ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/ -+ -+ /* no hold off period */ -+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0); -+ /* clear previous interrupts */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0); -+ /* check irq status */ -+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat); -+ /* assert(0 == irq_stat); */ -+ if (0 != irq_stat) -+ DRM_ERROR("TOPAZ: there is hold up\n"); -+ -+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), -+ (src_phy_addr + offset)); -+ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT, -+ is_write, DMAC_PWIDTH_32_BIT, byte_num); -+ /* generate an interrupt at the end of transfer */ -+ count |= MASK_IMG_SOC_TRANSFER_IEN; -+ count |= F_ENCODE(is_write, IMG_SOC_DIR); -+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count); -+ -+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel), -+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, -+ is_increment, DMAC_BURST_2)); -+ -+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr); -+ -+ /* Finally, rewrite the count register with -+ * the enable bit set to kick off the transfer -+ */ -+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN); -+ -+ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n"); -+ -+ return; -+} -+ -+void topaz_set_default_regs(struct drm_psb_private *dev_priv) -+{ -+ int n; -+ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); -+ -+ for (n = 0; n < count; n++) -+ MM_WRITE32(topaz_default_regs[n][0], -+ topaz_default_regs[n][1], -+ topaz_default_regs[n][2]); -+ -+} -+ -+void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg, -+ const uint32_t val) -+{ -+ uint32_t tmp; -+ get_mtx_control_from_dash(dev_priv); -+ -+ /* put data into MTX_RW_DATA */ -+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val); -+ -+ /* request a write */ -+ tmp = reg & -+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK; -+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp); -+ -+ /* wait for operation finished */ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); -+ -+ release_mtx_control_from_dash(dev_priv); -+} -+ -+void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg, -+ uint32_t *ret_val) -+{ -+ uint32_t tmp; -+ -+ get_mtx_control_from_dash(dev_priv); -+ -+ /* request a write */ -+ tmp = (reg & -+ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); -+ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp); -+ -+ /* wait for operation finished */ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, -+ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); -+ -+ /* read */ -+ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, -+ ret_val); -+ -+ release_mtx_control_from_dash(dev_priv); -+} -+ -+void get_mtx_control_from_dash(struct drm_psb_private *dev_priv) -+{ -+ int debug_reg_slave_val; -+ -+ /* GetMTXControlFromDash */ -+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, -+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) | -+ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT)); -+ do { -+ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, -+ &debug_reg_slave_val); -+ } while ((debug_reg_slave_val & 0x18) != 0); -+ -+ /* save access control */ -+ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, -+ &dev_priv->topaz_dash_access_ctrl); -+} -+ -+void release_mtx_control_from_dash(struct drm_psb_private *dev_priv) -+{ -+ /* restore access control */ -+ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET, -+ dev_priv->topaz_dash_access_ctrl); -+ -+ /* release bus */ -+ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, -+ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE)); -+} -+ -+void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv) -+{ -+ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu); -+ -+ /* bypass all request while MMU is being configured */ -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, -+ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS)); -+ -+ /* set MMU hardware at the page table directory */ -+ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x " -+ "into MMU_DIR_LIST0/1\n", pd_addr); -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr); -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0); -+ -+ /* setup index register, all pointing to directory bank 0 */ -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0); -+ -+ /* now enable MMU access for all requestors */ -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0); -+} -+ -+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv) -+{ -+ uint32_t mmu_control; -+ -+#if 0 -+ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache" -+ " so flush using the master core\n"); -+#endif -+ /* XXX: disable interrupt */ -+ -+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control); -+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC); -+ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH); -+ -+#if 0 -+ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n" -+ "still operating afterwards even if not cleared,\n" -+ "but may want to replace with MMU_FLUSH?\n"); -+#endif -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control); -+ -+ /* clear it */ -+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC)); -+ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH)); -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control); -+} -+ -+#if 0 /* DEBUG_FUNCTION */ -+struct reg_pair { -+ uint32_t base; -+ uint32_t offset; -+}; -+ -+ -+static int ccb_offset; -+ -+static int topaz_test_null(struct drm_device *dev, uint32_t seq) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ /* XXX: here we finished firmware setup.... -+ * using a NULL command to verify the -+ * correctness of firmware -+ */ -+ uint32_t null_cmd; -+ uint32_t cmd_seq; -+ -+ null_cmd = 0 | (1 << 8) | (seq) << 16; -+ topaz_write_mtx_mem(dev_priv, -+ dev_priv->topaz_ccb_buffer_addr + ccb_offset, -+ null_cmd); -+ -+ topaz_mtx_kick(dev_priv, 1); -+ -+ DRM_UDELAY(1000); /* wait to finish */ -+ -+ cmd_seq = topaz_read_mtx_mem(dev_priv, -+ dev_priv->topaz_ccb_ctrl_addr + 4); -+ -+ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x," -+ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n", -+ seq, cmd_seq, WB_SEQ, WB_ROFF); -+ -+ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n"); -+ -+ topaz_test_queryirq(dev); -+ topaz_test_clearirq(dev); -+ -+ ccb_offset += 4; -+ -+ return 0; -+} -+ -+void topaz_mmu_flush(struct drm_psb_private *dev_priv) -+{ -+ uint32_t val; -+ -+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val); -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, -+ val | F_ENCODE(1, TOPAZ_CR_MMU_INVALDC)); -+ wmb(); -+ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, -+ val & ~F_ENCODE(0, TOPAZ_CR_MMU_INVALDC)); -+ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val); -+} -+ -+/* -+ * this function will test whether the mmu is correct: -+ * it get a drm_buffer_object and use CMD_SYNC to write -+ * certain value into this buffer. -+ */ -+static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t sync_cmd; -+ unsigned long real_pfn; -+ int ret; -+ uint32_t cmd_seq; -+ -+ *((uint32_t *)dev_priv->topaz_sync_addr) = 0xeeeeeeee; -+ -+ /* topaz_mmu_flush(dev); */ -+ -+ sync_cmd = MTX_CMDID_SYNC | (3 << 8) | (0xeeee) << 16; -+ -+ topaz_write_mtx_mem_multiple_setup(dev_priv, -+ dev_priv->topaz_ccb_buffer_addr + ccb_offset); -+ -+ topaz_write_mtx_mem_multiple(dev_priv, sync_cmd); -+ topaz_write_mtx_mem_multiple(dev_priv, dev_priv->topaz_sync_offset); -+ topaz_write_mtx_mem_multiple(dev_priv, sync_value); -+ -+ topaz_mtx_kick(dev_priv, 1); -+ -+ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu), -+ dev_priv->topaz_sync_offset, &real_pfn); -+ if (ret != 0) { -+ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n"); -+ return; -+ } -+ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, " -+ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n", -+ dev_priv->topaz_sync_offset, real_pfn, sync_value); -+ -+ /* XXX: if we can use interrupt, we can wait this command finish */ -+ /* topaz_wait_for_register (dev_priv, -+ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, 0xf, 0xf); */ -+ DRM_UDELAY(1000); -+ -+ cmd_seq = topaz_read_mtx_mem(dev_priv, -+ dev_priv->topaz_ccb_ctrl_addr + 4); -+ PSB_DEBUG_GENERAL("Topaz: cmd_seq equals 0x%x, and expected 0x%x " -+ "(WB_seq=0x%08x,WB_roff=%d),synch value is 0x%x," -+ "expected 0x%08x\n", -+ cmd_seq, 0xeeee, WB_SEQ, WB_ROFF, -+ *((uint32_t *)dev_priv->topaz_sync_addr), sync_value); -+ -+ PSB_DEBUG_GENERAL("Topaz: after MMU test, query IRQ and clear it\n"); -+ topaz_test_queryirq(dev); -+ topaz_test_clearirq(dev); -+ -+ ccb_offset += 3*4; /* shift 3DWs */ -+} -+ -+#endif -+ -+int lnc_topaz_restore_mtx_state(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ uint32_t reg_val; -+ uint32_t *mtx_reg_state; -+ int i; -+ -+ if (dev_priv->topaz_mtx_data_mem == NULL) { -+ DRM_ERROR("TOPAZ: try to restore context without " -+ "space allocated\n"); -+ return -1; -+ } -+ -+ /* turn on mtx clocks */ -+ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, ®_val); -+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, -+ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE)); -+ -+ /* reset mtx */ -+ /* FIXME: should use core_write??? */ -+ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET, -+ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK); -+ DRM_UDELAY(6000); -+ -+ topaz_mmu_hwsetup(dev_priv); -+ /* upload code, restore mtx data */ -+ mtx_dma_write(dev); -+ -+ mtx_reg_state = dev_priv->topaz_mtx_reg_state; -+ /* restore register */ -+ /* FIXME: conside to put read/write into one function */ -+ /* Saves 8 Registers of D0 Bank */ -+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */ -+ for (i = 0; i < 8; i++) { -+ topaz_write_core_reg(dev_priv, 0x1 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 8 Registers of D1 Bank */ -+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */ -+ for (i = 0; i < 8; i++) { -+ topaz_write_core_reg(dev_priv, 0x2 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 4 Registers of A0 Bank */ -+ /* A0StP, A0FrP, A0.2 and A0.3 */ -+ for (i = 0; i < 4; i++) { -+ topaz_write_core_reg(dev_priv, 0x3 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 4 Registers of A1 Bank */ -+ /* A1GbP, A1LbP, A1.2 and A1.3 */ -+ for (i = 0; i < 4; i++) { -+ topaz_write_core_reg(dev_priv, 0x4 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves PC and PCX */ -+ for (i = 0; i < 2; i++) { -+ topaz_write_core_reg(dev_priv, 0x5 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 8 Control Registers */ -+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI, -+ * TXGPIOO */ -+ for (i = 0; i < 8; i++) { -+ topaz_write_core_reg(dev_priv, 0x7 | (i<<4), -+ *mtx_reg_state); -+ mtx_reg_state++; -+ } -+ -+ /* turn on MTX */ -+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, -+ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK); -+ -+ return 0; -+} -+ -+int lnc_topaz_save_mtx_state(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ uint32_t *mtx_reg_state; -+ int i; -+ struct topaz_codec_fw *cur_codec_fw; -+ -+ /* FIXME: make sure the topaz_mtx_data_mem is allocated */ -+ if (dev_priv->topaz_mtx_data_mem == NULL) { -+ DRM_ERROR("TOPAZ: try to save context without space " -+ "allocated\n"); -+ return -1; -+ } -+ -+ topaz_wait_for_register(dev_priv, -+ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET, -+ TXRPT_WAITONKICK_VALUE, -+ 0xffffffff); -+ -+ /* stop mtx */ -+ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET, -+ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK); -+ -+ mtx_reg_state = dev_priv->topaz_mtx_reg_state; -+ -+ /* FIXME: conside to put read/write into one function */ -+ /* Saves 8 Registers of D0 Bank */ -+ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */ -+ for (i = 0; i < 8; i++) { -+ topaz_read_core_reg(dev_priv, 0x1 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 8 Registers of D1 Bank */ -+ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */ -+ for (i = 0; i < 8; i++) { -+ topaz_read_core_reg(dev_priv, 0x2 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 4 Registers of A0 Bank */ -+ /* A0StP, A0FrP, A0.2 and A0.3 */ -+ for (i = 0; i < 4; i++) { -+ topaz_read_core_reg(dev_priv, 0x3 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 4 Registers of A1 Bank */ -+ /* A1GbP, A1LbP, A1.2 and A1.3 */ -+ for (i = 0; i < 4; i++) { -+ topaz_read_core_reg(dev_priv, 0x4 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves PC and PCX */ -+ for (i = 0; i < 2; i++) { -+ topaz_read_core_reg(dev_priv, 0x5 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ /* Saves 8 Control Registers */ -+ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI, -+ * TXGPIOO */ -+ for (i = 0; i < 8; i++) { -+ topaz_read_core_reg(dev_priv, 0x7 | (i<<4), -+ mtx_reg_state); -+ mtx_reg_state++; -+ } -+ -+ /* save mtx data memory */ -+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec]; -+ -+ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000, -+ dev_priv->cur_mtx_data_size); -+ -+ /* turn off mtx clocks */ -+ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, -+ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE); -+ -+ return 0; -+} -+ -+void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ struct ttm_buffer_object *target; -+ -+ /* setup mtx DMAC registers to do transfer */ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr); -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, -+ F_ENCODE(2, MTX_BURSTSIZE) | -+ F_ENCODE(1, MTX_RNW) | -+ F_ENCODE(1, MTX_ENABLE) | -+ F_ENCODE(size, MTX_LENGTH)); -+ -+ /* give the DMAC access to the host memory via BIF */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); -+ -+ target = dev_priv->topaz_mtx_data_mem; -+ /* transfert the data */ -+ /* FIXME: size is meaured by bytes? */ -+ topaz_dma_transfer(dev_priv, 0, target->offset, 0, -+ MTX_CR_MTX_SYSC_CDMAT, -+ size, 0, 1); -+ -+ /* wait for it transfer */ -+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); -+ /* clear interrupt */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); -+ /* give access back to topaz core */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); -+} -+ -+void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr, -+ uint32_t soc_addr, uint32_t bytes_num, -+ int increment, int rnw) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ uint32_t count_reg; -+ uint32_t irq_state; -+ -+ /* check no transfer is in progress */ -+ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg); -+ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) { -+ DRM_ERROR("TOPAZ: there's transfer in progress when wanna " -+ "save mtx data\n"); -+ /* FIXME: how to handle this error */ -+ return; -+ } -+ -+ /* no hold off period */ -+ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0); -+ /* cleare irq state */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0); -+ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state); -+ if (0 != irq_state) { -+ DRM_ERROR("TOPAZ: there's irq cann't clear\n"); -+ return; -+ } -+ -+ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr); -+ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, -+ DMAC_PWIDTH_32_BIT, rnw, -+ DMAC_PWIDTH_32_BIT, bytes_num); -+ /* generate an interrupt at end of transfer */ -+ count_reg |= MASK_IMG_SOC_TRANSFER_IEN; -+ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR); -+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg); -+ -+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel), -+ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment, -+ DMAC_BURST_2)); -+ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr); -+ -+ /* Finally, rewrite the count register with the enable -+ * bit set to kick off the transfer */ -+ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), -+ count_reg | MASK_IMG_SOC_EN); -+} -+ -+void mtx_dma_write(struct drm_device *dev) -+{ -+ struct topaz_codec_fw *cur_codec_fw; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ -+ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec]; -+ -+ /* upload code */ -+ /* setup mtx DMAC registers to recieve transfer */ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000); -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, -+ F_ENCODE(2, MTX_BURSTSIZE) | -+ F_ENCODE(0, MTX_RNW) | -+ F_ENCODE(1, MTX_ENABLE) | -+ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH)); -+ -+ /* give DMAC access to host memory */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); -+ -+ /* transfer code */ -+ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0, -+ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4, -+ 0, 0); -+ /* wait finished */ -+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); -+ /* clear interrupt */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); -+ -+ /* setup mtx start recieving data */ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 + -+ (cur_codec_fw->data_location) - 0x82880000); -+ -+ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC, -+ F_ENCODE(2, MTX_BURSTSIZE) | -+ F_ENCODE(0, MTX_RNW) | -+ F_ENCODE(1, MTX_ENABLE) | -+ F_ENCODE(dev_priv->cur_mtx_data_size, MTX_LENGTH)); -+ -+ /* give DMAC access to host memory */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1); -+ -+ /* transfer data */ -+ topaz_dma_transfer(dev_priv, 0, dev_priv->topaz_mtx_data_mem->offset, -+ 0, MTX_CR_MTX_SYSC_CDMAT, -+ dev_priv->cur_mtx_data_size, -+ 0, 0); -+ /* wait finished */ -+ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START, -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN), -+ F_ENCODE(1, IMG_SOC_TRANSFER_FIN)); -+ /* clear interrupt */ -+ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0); -+ -+ /* give access back to Topaz Core */ -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0); -+} -+ -+#if 0 -+void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data) -+{ -+ int n; -+ int count; -+ -+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); -+ for (n = 0; n < count; n++, ++data) -+ MM_READ32(topaz_default_regs[n][0], -+ topaz_default_regs[n][1], -+ data); -+ -+} -+ -+void topaz_restore_default_regs(struct drm_psb_private *dev_priv, -+ uint32_t *data) -+{ -+ int n; -+ int count; -+ -+ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3); -+ for (n = 0; n < count; n++, ++data) -+ MM_WRITE32(topaz_default_regs[n][0], -+ topaz_default_regs[n][1], -+ *data); -+ -+} -+#endif -diff -uNr a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile ---- a/drivers/gpu/drm/psb/Makefile 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/Makefile 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,18 @@ -+# -+# Makefile for the drm device driver. This driver provides support for the -+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -+ -+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/psb -+ -+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \ -+ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \ -+ psb_reset.o psb_xhw.o psb_msvdx.o \ -+ lnc_topaz.o lnc_topazinit.o \ -+ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \ -+ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \ -+ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \ -+ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \ -+ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o -+ -+obj-$(CONFIG_DRM_PSB) += psb.o -+ -diff -uNr a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c ---- a/drivers/gpu/drm/psb/psb_buffer.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_buffer.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,504 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+#include "ttm/ttm_placement_common.h" -+#include "ttm/ttm_execbuf_util.h" -+#include "ttm/ttm_fence_api.h" -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_schedule.h" -+ -+#define DRM_MEM_TTM 26 -+ -+struct drm_psb_ttm_backend { -+ struct ttm_backend base; -+ struct page **pages; -+ unsigned int desired_tile_stride; -+ unsigned int hw_tile_stride; -+ int mem_type; -+ unsigned long offset; -+ unsigned long num_pages; -+}; -+ -+/* -+ * Poulsbo GPU virtual space looks like this -+ * (We currently use only one MMU context). -+ * -+ * gatt_start = Start of GATT aperture in bus space. -+ * stolen_end = End of GATT populated by stolen memory in bus space. -+ * gatt_end = End of GATT -+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end) -+ * -+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- -+ * and copy operations. -+ * This space is not managed and is protected by the -+ * temp_mem mutex. -+ * -+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers. -+ * -+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use. -+ * -+ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages. -+ * -+ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine. -+ * -+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not -+ * usable by 2D engine. -+ * -+ * gatt_end -> 0xffffffff Currently unused. -+ */ -+ -+static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, -+ struct ttm_mem_type_manager *man) -+{ -+ -+ struct drm_psb_private *dev_priv = -+ container_of(bdev, struct drm_psb_private, bdev); -+ struct psb_gtt *pg = dev_priv->pg; -+ -+ switch (type) { -+ case TTM_PL_SYSTEM: -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_CACHED; -+ break; -+ case DRM_PSB_MEM_KERNEL: -+ man->io_offset = 0x00000000; -+ man->io_size = 0x00000000; -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+ man->gpu_offset = PSB_MEM_KERNEL_START; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ case DRM_PSB_MEM_MMU: -+ man->io_offset = 0x00000000; -+ man->io_size = 0x00000000; -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+ man->gpu_offset = PSB_MEM_MMU_START; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ case DRM_PSB_MEM_PDS: -+ man->io_offset = 0x00000000; -+ man->io_size = 0x00000000; -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+ man->gpu_offset = PSB_MEM_PDS_START; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ case DRM_PSB_MEM_RASTGEOM: -+ man->io_offset = 0x00000000; -+ man->io_size = 0x00000000; -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+ man->gpu_offset = PSB_MEM_RASTGEOM_START; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ case TTM_PL_VRAM: -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_FIXED | -+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+#ifdef PSB_WORKING_HOST_MMU_ACCESS -+ man->io_offset = pg->gatt_start; -+ man->io_size = pg->gatt_pages << PAGE_SHIFT; -+#else -+ man->io_offset = pg->stolen_base; -+ man->io_size = pg->vram_stolen_size; -+#endif -+ man->gpu_offset = pg->gatt_start; -+ man->available_caching = TTM_PL_FLAG_UNCACHED | -+ TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ case TTM_PL_CI: -+ man->io_addr = NULL; -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_FIXED | -+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+ man->io_offset = dev_priv->ci_region_start; -+ man->io_size = pg->ci_stolen_size; -+ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size; -+ man->available_caching = TTM_PL_FLAG_UNCACHED; -+ man->default_caching = TTM_PL_FLAG_UNCACHED; -+ break; -+ case TTM_PL_TT: /* Mappable GATT memory */ -+ man->io_offset = pg->gatt_start; -+ man->io_size = pg->gatt_pages << PAGE_SHIFT; -+ man->io_addr = NULL; -+#ifdef PSB_WORKING_HOST_MMU_ACCESS -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+#else -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+#endif -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ man->gpu_offset = pg->gatt_start; -+ break; -+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */ -+ man->io_offset = pg->gatt_start; -+ man->io_size = pg->gatt_pages << PAGE_SHIFT; -+ man->io_addr = NULL; -+#ifdef PSB_WORKING_HOST_MMU_ACCESS -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; -+#else -+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -+ TTM_MEMTYPE_FLAG_CMA; -+#endif -+ man->gpu_offset = pg->gatt_start; -+ man->available_caching = TTM_PL_FLAG_CACHED | -+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; -+ man->default_caching = TTM_PL_FLAG_WC; -+ break; -+ default: -+ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static uint32_t psb_evict_mask(struct ttm_buffer_object *bo) -+{ -+ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM; -+ -+ -+ switch (bo->mem.mem_type) { -+ case TTM_PL_VRAM: -+ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT) -+ return cur_placement | TTM_PL_FLAG_TT; -+ else -+ return cur_placement | TTM_PL_FLAG_SYSTEM; -+ default: -+ return cur_placement | TTM_PL_FLAG_SYSTEM; -+ } -+} -+ -+static int psb_invalidate_caches(struct ttm_bo_device *bdev, -+ uint32_t placement) -+{ -+ return 0; -+} -+ -+static int psb_move_blit(struct ttm_buffer_object *bo, -+ bool evict, bool no_wait, -+ struct ttm_mem_reg *new_mem) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(bo->bdev, struct drm_psb_private, bdev); -+ struct drm_device *dev = dev_priv->dev; -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ struct ttm_fence_object *fence; -+ int dir = 0; -+ int ret; -+ -+ if ((old_mem->mem_type == new_mem->mem_type) && -+ (new_mem->mm_node->start < -+ old_mem->mm_node->start + old_mem->mm_node->size)) { -+ dir = 1; -+ } -+ -+ psb_emit_2d_copy_blit(dev, -+ old_mem->mm_node->start << PAGE_SHIFT, -+ new_mem->mm_node->start << PAGE_SHIFT, -+ new_mem->num_pages, dir); -+ -+ ret = ttm_fence_object_create(&dev_priv->fdev, 0, -+ _PSB_FENCE_TYPE_EXE, -+ TTM_FENCE_FLAG_EMIT, -+ &fence); -+ if (unlikely(ret != 0)) { -+ psb_idle_2d(dev); -+ if (fence) -+ ttm_fence_object_unref(&fence); -+ } -+ -+ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence, -+ (void *) (unsigned long) -+ _PSB_FENCE_TYPE_EXE, -+ evict, no_wait, new_mem); -+ if (fence) -+ ttm_fence_object_unref(&fence); -+ return ret; -+} -+ -+/* -+ * Flip destination ttm into GATT, -+ * then blit and subsequently move out again. -+ */ -+ -+static int psb_move_flip(struct ttm_buffer_object *bo, -+ bool evict, bool interruptible, bool no_wait, -+ struct ttm_mem_reg *new_mem) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_mem_reg tmp_mem; -+ int ret; -+ -+ tmp_mem = *new_mem; -+ tmp_mem.mm_node = NULL; -+ tmp_mem.proposed_flags = TTM_PL_FLAG_TT; -+ -+ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait); -+ if (ret) -+ return ret; -+ ret = ttm_tt_bind(bo->ttm, &tmp_mem); -+ if (ret) -+ goto out_cleanup; -+ ret = psb_move_blit(bo, true, no_wait, &tmp_mem); -+ if (ret) -+ goto out_cleanup; -+ -+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); -+out_cleanup: -+ if (tmp_mem.mm_node) { -+ spin_lock(&bdev->lru_lock); -+ drm_mm_put_block(tmp_mem.mm_node); -+ tmp_mem.mm_node = NULL; -+ spin_unlock(&bdev->lru_lock); -+ } -+ return ret; -+} -+ -+static int psb_move(struct ttm_buffer_object *bo, -+ bool evict, bool interruptible, -+ bool no_wait, struct ttm_mem_reg *new_mem) -+{ -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ -+ if (old_mem->mem_type == TTM_PL_SYSTEM) { -+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); -+ } else if (new_mem->mem_type == TTM_PL_SYSTEM) { -+ int ret = psb_move_flip(bo, evict, interruptible, -+ no_wait, new_mem); -+ if (unlikely(ret != 0)) { -+ if (ret == -ERESTART) -+ return ret; -+ else -+ return ttm_bo_move_memcpy(bo, evict, no_wait, -+ new_mem); -+ } -+ } else { -+ if (psb_move_blit(bo, evict, no_wait, new_mem)) -+ return ttm_bo_move_memcpy(bo, evict, no_wait, -+ new_mem); -+ } -+ return 0; -+} -+ -+static int drm_psb_tbe_populate(struct ttm_backend *backend, -+ unsigned long num_pages, -+ struct page **pages, -+ struct page *dummy_read_page) -+{ -+ struct drm_psb_ttm_backend *psb_be = -+ container_of(backend, struct drm_psb_ttm_backend, base); -+ -+ psb_be->pages = pages; -+ return 0; -+} -+ -+static int drm_psb_tbe_unbind(struct ttm_backend *backend) -+{ -+ struct ttm_bo_device *bdev = backend->bdev; -+ struct drm_psb_private *dev_priv = -+ container_of(bdev, struct drm_psb_private, bdev); -+ struct drm_psb_ttm_backend *psb_be = -+ container_of(backend, struct drm_psb_ttm_backend, base); -+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); -+ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; -+ -+ PSB_DEBUG_RENDER("MMU unbind.\n"); -+ -+ if (psb_be->mem_type == TTM_PL_TT) { -+ uint32_t gatt_p_offset = -+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT; -+ -+ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset, -+ psb_be->num_pages, -+ psb_be->desired_tile_stride, -+ psb_be->hw_tile_stride); -+ } -+ -+ psb_mmu_remove_pages(pd, psb_be->offset, -+ psb_be->num_pages, -+ psb_be->desired_tile_stride, -+ psb_be->hw_tile_stride); -+ -+ return 0; -+} -+ -+static int drm_psb_tbe_bind(struct ttm_backend *backend, -+ struct ttm_mem_reg *bo_mem) -+{ -+ struct ttm_bo_device *bdev = backend->bdev; -+ struct drm_psb_private *dev_priv = -+ container_of(bdev, struct drm_psb_private, bdev); -+ struct drm_psb_ttm_backend *psb_be = -+ container_of(backend, struct drm_psb_ttm_backend, base); -+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu); -+ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type]; -+ int type; -+ int ret = 0; -+ -+ psb_be->mem_type = bo_mem->mem_type; -+ psb_be->num_pages = bo_mem->num_pages; -+ psb_be->desired_tile_stride = 0; -+ psb_be->hw_tile_stride = 0; -+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) + -+ man->gpu_offset; -+ -+ type = -+ (bo_mem-> -+ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0; -+ -+ PSB_DEBUG_RENDER("MMU bind.\n"); -+ if (psb_be->mem_type == TTM_PL_TT) { -+ uint32_t gatt_p_offset = -+ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT; -+ -+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages, -+ gatt_p_offset, -+ psb_be->num_pages, -+ psb_be->desired_tile_stride, -+ psb_be->hw_tile_stride, type); -+ } -+ -+ ret = psb_mmu_insert_pages(pd, psb_be->pages, -+ psb_be->offset, psb_be->num_pages, -+ psb_be->desired_tile_stride, -+ psb_be->hw_tile_stride, type); -+ if (ret) -+ goto out_err; -+ -+ return 0; -+out_err: -+ drm_psb_tbe_unbind(backend); -+ return ret; -+ -+} -+ -+static void drm_psb_tbe_clear(struct ttm_backend *backend) -+{ -+ struct drm_psb_ttm_backend *psb_be = -+ container_of(backend, struct drm_psb_ttm_backend, base); -+ -+ psb_be->pages = NULL; -+ return; -+} -+ -+static void drm_psb_tbe_destroy(struct ttm_backend *backend) -+{ -+ struct drm_psb_ttm_backend *psb_be = -+ container_of(backend, struct drm_psb_ttm_backend, base); -+ -+ if (backend) -+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM); -+} -+ -+static struct ttm_backend_func psb_ttm_backend = { -+ .populate = drm_psb_tbe_populate, -+ .clear = drm_psb_tbe_clear, -+ .bind = drm_psb_tbe_bind, -+ .unbind = drm_psb_tbe_unbind, -+ .destroy = drm_psb_tbe_destroy, -+}; -+ -+static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev) -+{ -+ struct drm_psb_ttm_backend *psb_be; -+ -+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM); -+ if (!psb_be) -+ return NULL; -+ psb_be->pages = NULL; -+ psb_be->base.func = &psb_ttm_backend; -+ psb_be->base.bdev = bdev; -+ return &psb_be->base; -+} -+ -+/* -+ * Use this memory type priority if no eviction is needed. -+ */ -+static uint32_t psb_mem_prios[] = { -+ TTM_PL_CI, -+ TTM_PL_VRAM, -+ TTM_PL_TT, -+ DRM_PSB_MEM_KERNEL, -+ DRM_PSB_MEM_MMU, -+ DRM_PSB_MEM_RASTGEOM, -+ DRM_PSB_MEM_PDS, -+ DRM_PSB_MEM_APER, -+ TTM_PL_SYSTEM -+}; -+ -+/* -+ * Use this memory type priority if need to evict. -+ */ -+static uint32_t psb_busy_prios[] = { -+ TTM_PL_TT, -+ TTM_PL_VRAM, -+ TTM_PL_CI, -+ DRM_PSB_MEM_KERNEL, -+ DRM_PSB_MEM_MMU, -+ DRM_PSB_MEM_RASTGEOM, -+ DRM_PSB_MEM_PDS, -+ DRM_PSB_MEM_APER, -+ TTM_PL_SYSTEM -+}; -+ -+ -+struct ttm_bo_driver psb_ttm_bo_driver = { -+ .mem_type_prio = psb_mem_prios, -+ .mem_busy_prio = psb_busy_prios, -+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios), -+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios), -+ .create_ttm_backend_entry = &drm_psb_tbe_init, -+ .invalidate_caches = &psb_invalidate_caches, -+ .init_mem_type = &psb_init_mem_type, -+ .evict_flags = &psb_evict_mask, -+ .move = &psb_move, -+ .verify_access = &psb_verify_access, -+ .sync_obj_signaled = &ttm_fence_sync_obj_signaled, -+ .sync_obj_wait = &ttm_fence_sync_obj_wait, -+ .sync_obj_flush = &ttm_fence_sync_obj_flush, -+ .sync_obj_unref = &ttm_fence_sync_obj_unref, -+ .sync_obj_ref = &ttm_fence_sync_obj_ref -+}; -diff -uNr a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h ---- a/drivers/gpu/drm/psb/psb_drm.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_drm.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,444 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+ -+#ifndef _PSB_DRM_H_ -+#define _PSB_DRM_H_ -+ -+#if defined(__linux__) && !defined(__KERNEL__) -+#include<stdint.h> -+#endif -+ -+#include "ttm/ttm_fence_user.h" -+#include "ttm/ttm_placement_user.h" -+ -+#define DRM_PSB_SAREA_MAJOR 0 -+#define DRM_PSB_SAREA_MINOR 2 -+#define PSB_FIXED_SHIFT 16 -+ -+#define DRM_PSB_FIRST_TA_USE_REG 3 -+#define DRM_PSB_NUM_TA_USE_REG 6 -+#define DRM_PSB_FIRST_RASTER_USE_REG 8 -+#define DRM_PSB_NUM_RASTER_USE_REG 7 -+ -+/* -+ * Public memory types. -+ */ -+ -+#define DRM_PSB_MEM_MMU TTM_PL_PRIV1 -+#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1 -+#define DRM_PSB_MEM_PDS TTM_PL_PRIV2 -+#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2 -+#define DRM_PSB_MEM_APER TTM_PL_PRIV3 -+#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3 -+#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4 -+#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4 -+#define PSB_MEM_RASTGEOM_START 0x30000000 -+ -+typedef int32_t psb_fixed; -+typedef uint32_t psb_ufixed; -+ -+static inline int32_t psb_int_to_fixed(int a) -+{ -+ return a * (1 << PSB_FIXED_SHIFT); -+} -+ -+static inline uint32_t psb_unsigned_to_ufixed(unsigned int a) -+{ -+ return a << PSB_FIXED_SHIFT; -+} -+ -+/*Status of the command sent to the gfx device.*/ -+typedef enum { -+ DRM_CMD_SUCCESS, -+ DRM_CMD_FAILED, -+ DRM_CMD_HANG -+} drm_cmd_status_t; -+ -+struct drm_psb_scanout { -+ uint32_t buffer_id; /* DRM buffer object ID */ -+ uint32_t rotation; /* Rotation as in RR_rotation definitions */ -+ uint32_t stride; /* Buffer stride in bytes */ -+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */ -+ uint32_t width; /* Buffer width in pixels */ -+ uint32_t height; /* Buffer height in lines */ -+ int32_t transform[3][3]; /* Buffer composite transform */ -+ /* (scaling, rot, reflect) */ -+}; -+ -+#define DRM_PSB_SAREA_OWNERS 16 -+#define DRM_PSB_SAREA_OWNER_2D 0 -+#define DRM_PSB_SAREA_OWNER_3D 1 -+ -+#define DRM_PSB_SAREA_SCANOUTS 3 -+ -+struct drm_psb_sarea { -+ /* Track changes of this data structure */ -+ -+ uint32_t major; -+ uint32_t minor; -+ -+ /* Last context to touch part of hw */ -+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS]; -+ -+ /* Definition of front- and rotated buffers */ -+ uint32_t num_scanouts; -+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS]; -+ -+ int planeA_x; -+ int planeA_y; -+ int planeA_w; -+ int planeA_h; -+ int planeB_x; -+ int planeB_y; -+ int planeB_w; -+ int planeB_h; -+ /* Number of active scanouts */ -+ uint32_t num_active_scanouts; -+}; -+ -+#define PSB_RELOC_MAGIC 0x67676767 -+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF -+#define PSB_RELOC_SHIFT_SHIFT 0 -+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000 -+#define PSB_RELOC_ALSHIFT_SHIFT 16 -+ -+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated -+ * buffer -+ */ -+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated -+ * buffer, relative to 2D -+ * base address -+ */ -+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer, -+ * relative to PDS base address -+ */ -+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated -+ * buffer (for tiling) -+ */ -+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer -+ * relative to base reg -+ */ -+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */ -+ -+struct drm_psb_reloc { -+ uint32_t reloc_op; -+ uint32_t where; /* offset in destination buffer */ -+ uint32_t buffer; /* Buffer reloc applies to */ -+ uint32_t mask; /* Destination format: */ -+ uint32_t shift; /* Destination format: */ -+ uint32_t pre_add; /* Destination format: */ -+ uint32_t background; /* Destination add */ -+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */ -+ uint32_t arg0; /* Reloc-op dependant */ -+ uint32_t arg1; -+}; -+ -+ -+#define PSB_GPU_ACCESS_READ (1ULL << 32) -+#define PSB_GPU_ACCESS_WRITE (1ULL << 33) -+#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE) -+ -+#define PSB_BO_FLAG_TA (1ULL << 48) -+#define PSB_BO_FLAG_SCENE (1ULL << 49) -+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50) -+#define PSB_BO_FLAG_USSE (1ULL << 51) -+#define PSB_BO_FLAG_COMMAND (1ULL << 52) -+ -+#define PSB_ENGINE_2D 0 -+#define PSB_ENGINE_VIDEO 1 -+#define PSB_ENGINE_RASTERIZER 2 -+#define PSB_ENGINE_TA 3 -+#define PSB_ENGINE_HPRAST 4 -+#define LNC_ENGINE_ENCODE 5 -+ -+#define PSB_DEVICE_SGX 0x1 -+#define PSB_DEVICE_DISLAY 0x2 -+#define PSB_DEVICE_MSVDX 0x4 -+#define PSB_DEVICE_TOPAZ 0x8 -+ -+/* -+ * For this fence class we have a couple of -+ * fence types. -+ */ -+ -+#define _PSB_FENCE_EXE_SHIFT 0 -+#define _PSB_FENCE_TA_DONE_SHIFT 1 -+#define _PSB_FENCE_RASTER_DONE_SHIFT 2 -+#define _PSB_FENCE_SCENE_DONE_SHIFT 3 -+#define _PSB_FENCE_FEEDBACK_SHIFT 4 -+ -+#define _PSB_ENGINE_TA_FENCE_TYPES 5 -+#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT) -+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT) -+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT) -+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT) -+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT) -+ -+#define PSB_ENGINE_HPRAST 4 -+#define PSB_NUM_ENGINES 6 -+ -+#define PSB_TA_FLAG_FIRSTPASS (1 << 0) -+#define PSB_TA_FLAG_LASTPASS (1 << 1) -+ -+#define PSB_FEEDBACK_OP_VISTEST (1 << 0) -+ -+struct drm_psb_extension_rep { -+ int32_t exists; -+ uint32_t driver_ioctl_offset; -+ uint32_t sarea_offset; -+ uint32_t major; -+ uint32_t minor; -+ uint32_t pl; -+}; -+ -+#define DRM_PSB_EXT_NAME_LEN 128 -+ -+union drm_psb_extension_arg { -+ char extension[DRM_PSB_EXT_NAME_LEN]; -+ struct drm_psb_extension_rep rep; -+}; -+ -+struct psb_validate_req { -+ uint64_t set_flags; -+ uint64_t clear_flags; -+ uint64_t next; -+ uint64_t presumed_gpu_offset; -+ uint32_t buffer_handle; -+ uint32_t presumed_flags; -+ uint32_t group; -+ uint32_t pad64; -+}; -+ -+struct psb_validate_rep { -+ uint64_t gpu_offset; -+ uint32_t placement; -+ uint32_t fence_type_mask; -+}; -+ -+#define PSB_USE_PRESUMED (1 << 0) -+ -+struct psb_validate_arg { -+ int handled; -+ int ret; -+ union { -+ struct psb_validate_req req; -+ struct psb_validate_rep rep; -+ } d; -+}; -+ -+struct drm_psb_scene { -+ int handle_valid; -+ uint32_t handle; -+ uint32_t w; /* also contains msaa info */ -+ uint32_t h; -+ uint32_t num_buffers; -+}; -+ -+#define DRM_PSB_FENCE_NO_USER (1 << 0) -+ -+struct psb_ttm_fence_rep { -+ uint32_t handle; -+ uint32_t fence_class; -+ uint32_t fence_type; -+ uint32_t signaled_types; -+ uint32_t error; -+}; -+ -+typedef struct drm_psb_cmdbuf_arg { -+ uint64_t buffer_list; /* List of buffers to validate */ -+ uint64_t clip_rects; /* See i915 counterpart */ -+ uint64_t scene_arg; -+ uint64_t fence_arg; -+ -+ uint32_t ta_flags; -+ -+ uint32_t ta_handle; /* TA reg-value pairs */ -+ uint32_t ta_offset; -+ uint32_t ta_size; -+ -+ uint32_t oom_handle; -+ uint32_t oom_offset; -+ uint32_t oom_size; -+ -+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */ -+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */ -+ uint32_t cmdbuf_size; -+ -+ uint32_t reloc_handle; /* Reloc buffer object */ -+ uint32_t reloc_offset; -+ uint32_t num_relocs; -+ -+ int32_t damage; /* Damage front buffer with cliprects */ -+ /* Not implemented yet */ -+ uint32_t fence_flags; -+ uint32_t engine; -+ -+ /* -+ * Feedback; -+ */ -+ -+ uint32_t feedback_ops; -+ uint32_t feedback_handle; -+ uint32_t feedback_offset; -+ uint32_t feedback_breakpoints; -+ uint32_t feedback_size; -+}drm_psb_cmdbuf_arg_t; -+ -+struct drm_psb_xhw_init_arg { -+ uint32_t operation; -+ uint32_t buffer_handle; -+}; -+ -+/* -+ * Feedback components: -+ */ -+ -+/* -+ * Vistest component. The number of these in the feedback buffer -+ * equals the number of vistest breakpoints + 1. -+ * This is currently the only feedback component. -+ */ -+ -+struct drm_psb_vistest { -+ uint32_t vt[8]; -+}; -+ -+#define PSB_HW_COOKIE_SIZE 16 -+#define PSB_HW_FEEDBACK_SIZE 8 -+#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2) -+ -+struct drm_psb_xhw_arg { -+ uint32_t op; -+ int ret; -+ uint32_t irq_op; -+ uint32_t issue_irq; -+ uint32_t cookie[PSB_HW_COOKIE_SIZE]; -+ union { -+ struct { -+ uint32_t w; /* also contains msaa info */ -+ uint32_t h; -+ uint32_t size; -+ uint32_t clear_p_start; -+ uint32_t clear_num_pages; -+ } si; -+ struct { -+ uint32_t fire_flags; -+ uint32_t hw_context; -+ uint32_t offset; -+ uint32_t engine; -+ uint32_t flags; -+ uint32_t rca; -+ uint32_t num_oom_cmds; -+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE]; -+ } sb; -+ struct { -+ uint32_t pages; -+ uint32_t size; -+ uint32_t ta_min_size; -+ } bi; -+ struct { -+ uint32_t bca; -+ uint32_t rca; -+ uint32_t flags; -+ } oom; -+ struct { -+ uint32_t pt_offset; -+ uint32_t param_offset; -+ uint32_t flags; -+ } bl; -+ struct { -+ uint32_t value; -+ } cl; -+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE]; -+ } arg; -+}; -+ -+/* Controlling the kernel modesetting buffers */ -+ -+#define DRM_PSB_KMS_OFF 0x00 -+#define DRM_PSB_KMS_ON 0x01 -+#define DRM_PSB_VT_LEAVE 0x02 -+#define DRM_PSB_VT_ENTER 0x03 -+#define DRM_PSB_XHW_INIT 0x04 -+#define DRM_PSB_XHW 0x05 -+#define DRM_PSB_EXTENSION 0x06 -+ -+/* -+ * Xhw commands. -+ */ -+ -+#define PSB_XHW_INIT 0x00 -+#define PSB_XHW_TAKEDOWN 0x01 -+ -+#define PSB_XHW_FIRE_RASTER 0x00 -+#define PSB_XHW_SCENE_INFO 0x01 -+#define PSB_XHW_SCENE_BIND_FIRE 0x02 -+#define PSB_XHW_TA_MEM_INFO 0x03 -+#define PSB_XHW_RESET_DPM 0x04 -+#define PSB_XHW_OOM 0x05 -+#define PSB_XHW_TERMINATE 0x06 -+#define PSB_XHW_VISTEST 0x07 -+#define PSB_XHW_RESUME 0x08 -+#define PSB_XHW_TA_MEM_LOAD 0x09 -+#define PSB_XHW_CHECK_LOCKUP 0x0a -+ -+#define PSB_SCENE_FLAG_DIRTY (1 << 0) -+#define PSB_SCENE_FLAG_COMPLETE (1 << 1) -+#define PSB_SCENE_FLAG_SETUP (1 << 2) -+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3) -+#define PSB_SCENE_FLAG_CLEARED (1 << 4) -+ -+#define PSB_TA_MEM_FLAG_TA (1 << 0) -+#define PSB_TA_MEM_FLAG_RASTER (1 << 1) -+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2) -+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3) -+#define PSB_TA_MEM_FLAG_INIT (1 << 4) -+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5) -+ -+/*Raster fire will deallocate memory */ -+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0) -+/*Isp reset needed due to change in ZLS format */ -+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1) -+/*These are set by Xpsb. */ -+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000 -+/*The task has had at least one OOM and Xpsb will -+ send back messages on each fire. */ -+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24) -+ -+#define PSB_SCENE_ENGINE_TA 0 -+#define PSB_SCENE_ENGINE_RASTER 1 -+#define PSB_SCENE_NUM_ENGINES 2 -+ -+#define PSB_LOCKUP_RASTER (1 << 0) -+#define PSB_LOCKUP_TA (1 << 1) -+ -+struct drm_psb_dev_info_arg { -+ uint32_t num_use_attribute_registers; -+}; -+#define DRM_PSB_DEVINFO 0x01 -+ -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c ---- a/drivers/gpu/drm/psb/psb_drv.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_drv.c 2009-04-07 13:31:58.000000000 -0700 -@@ -0,0 +1,1465 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+ -+#include <drm/drmP.h> -+#include <drm/drm.h> -+#include "psb_drm.h" -+#include "psb_drv.h" -+#include "psb_reg.h" -+#include "psb_intel_reg.h" -+#include "psb_msvdx.h" -+#include "lnc_topaz.h" -+#include <drm/drm_pciids.h> -+#include "psb_scene.h" -+ -+#include <linux/cpu.h> -+#include <linux/notifier.h> -+#include <linux/spinlock.h> -+ -+int drm_psb_debug; -+EXPORT_SYMBOL(drm_psb_debug); -+static int drm_psb_trap_pagefaults; -+static int drm_psb_clock_gating; -+static int drm_psb_ta_mem_size = 32 * 1024; -+ -+int drm_psb_disable_vsync; -+int drm_psb_no_fb; -+int drm_psb_force_pipeb; -+int drm_idle_check_interval = 5; -+int drm_psb_ospm; -+ -+MODULE_PARM_DESC(debug, "Enable debug output"); -+MODULE_PARM_DESC(clock_gating, "clock gating"); -+MODULE_PARM_DESC(no_fb, "Disable FBdev"); -+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults"); -+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts"); -+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb"); -+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB"); -+MODULE_PARM_DESC(ospm, "switch for ospm support"); -+module_param_named(debug, drm_psb_debug, int, 0600); -+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600); -+module_param_named(no_fb, drm_psb_no_fb, int, 0600); -+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600); -+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600); -+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600); -+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600); -+module_param_named(ospm, drm_psb_ospm, int, 0600); -+ -+#ifndef CONFIG_X86_PAT -+#warning "Don't build this driver without PAT support!!!" -+#endif -+ -+#define psb_PCI_IDS \ -+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \ -+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \ -+ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \ -+ {0, 0, 0} -+ -+static struct pci_device_id pciidlist[] = { -+ psb_PCI_IDS -+}; -+ -+/* -+ * Standard IOCTLs. -+ */ -+ -+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \ -+ struct drm_psb_xhw_init_arg) -+#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \ -+ union drm_psb_extension_arg) -+/* -+ * TTM execbuf extension. -+ */ -+ -+#define DRM_PSB_CMDBUF (DRM_PSB_EXTENSION + 1) -+#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1) -+#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \ -+ struct drm_psb_cmdbuf_arg) -+#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \ -+ struct drm_psb_scene) -+#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE) -+#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \ -+ union drm_psb_extension_arg) -+/* -+ * TTM placement user extension. -+ */ -+ -+#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1) -+ -+#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET) -+#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET) -+#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET) -+#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET) -+#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET) -+#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET) -+ -+/* -+ * TTM fence extension. -+ */ -+ -+#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1) -+#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET) -+#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET) -+#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET) -+ -+#define DRM_IOCTL_PSB_TTM_PL_CREATE \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\ -+ union ttm_pl_create_arg) -+#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\ -+ union ttm_pl_reference_arg) -+#define DRM_IOCTL_PSB_TTM_PL_UNREF \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\ -+ struct ttm_pl_reference_req) -+#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\ -+ struct ttm_pl_synccpu_arg) -+#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\ -+ struct ttm_pl_waitidle_arg) -+#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\ -+ union ttm_pl_setstatus_arg) -+#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \ -+ union ttm_fence_signaled_arg) -+#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \ -+ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \ -+ union ttm_fence_finish_arg) -+#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \ -+ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \ -+ struct ttm_fence_unref_arg) -+ -+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+ -+#define PSB_IOCTL_DEF(ioctl, func, flags) \ -+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, func, flags} -+ -+static struct drm_ioctl_desc psb_ioctls[] = { -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl, -+ DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl, -+ DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl, -+ DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH), -+ -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl, -+ DRM_AUTH), -+ -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED, -+ psb_fence_signaled_ioctl, DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl, -+ DRM_AUTH), -+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl, -+ DRM_AUTH) -+}; -+ -+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls); -+ -+static void get_ci_info(struct drm_psb_private *dev_priv) -+{ -+ struct pci_dev *pdev; -+ -+ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL); -+ if (pdev == NULL) { -+ /* IF no pci_device we set size & addr to 0, no ci -+ * share buffer can be created */ -+ dev_priv->ci_region_start = 0; -+ dev_priv->ci_region_size = 0; -+ printk(KERN_ERR "can't find CI device, no ci share buffer\n"); -+ return; -+ } -+ -+ dev_priv->ci_region_start = pci_resource_start(pdev, 1); -+ dev_priv->ci_region_size = pci_resource_len(pdev, 1); -+ -+ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n", -+ dev_priv->ci_region_start, dev_priv->ci_region_size); -+ -+ pci_dev_put(pdev); -+ -+ return; -+} -+ -+static int dri_library_name(struct drm_device *dev, char *buf) -+{ -+ return snprintf(buf, PAGE_SIZE, "psb\n"); -+} -+ -+static void psb_set_uopt(struct drm_psb_uopt *uopt) -+{ -+ uopt->clock_gating = drm_psb_clock_gating; -+} -+ -+static void psb_lastclose(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ if (!dev->dev_private) -+ return; -+ -+ if (dev_priv->ta_mem) -+ psb_ta_mem_unref(&dev_priv->ta_mem); -+ mutex_lock(&dev_priv->cmdbuf_mutex); -+ if (dev_priv->context.buffers) { -+ vfree(dev_priv->context.buffers); -+ dev_priv->context.buffers = NULL; -+ } -+ mutex_unlock(&dev_priv->cmdbuf_mutex); -+} -+ -+static void psb_do_takedown(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ -+ -+ if (dev_priv->have_mem_rastgeom) { -+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM); -+ dev_priv->have_mem_rastgeom = 0; -+ } -+ if (dev_priv->have_mem_mmu) { -+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU); -+ dev_priv->have_mem_mmu = 0; -+ } -+ if (dev_priv->have_mem_aper) { -+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER); -+ dev_priv->have_mem_aper = 0; -+ } -+ if (dev_priv->have_tt) { -+ ttm_bo_clean_mm(bdev, TTM_PL_TT); -+ dev_priv->have_tt = 0; -+ } -+ if (dev_priv->have_vram) { -+ ttm_bo_clean_mm(bdev, TTM_PL_VRAM); -+ dev_priv->have_vram = 0; -+ } -+ if (dev_priv->have_camera) { -+ ttm_bo_clean_mm(bdev, TTM_PL_CI); -+ dev_priv->have_camera = 0; -+ } -+ -+ if (dev_priv->has_msvdx) -+ psb_msvdx_uninit(dev); -+ -+ if (IS_MRST(dev)) { -+ if (dev_priv->has_topaz) -+ lnc_topaz_uninit(dev); -+ } -+ -+ if (dev_priv->comm) { -+ kunmap(dev_priv->comm_page); -+ dev_priv->comm = NULL; -+ } -+ if (dev_priv->comm_page) { -+ __free_page(dev_priv->comm_page); -+ dev_priv->comm_page = NULL; -+ } -+} -+ -+void psb_clockgating(struct drm_psb_private *dev_priv) -+{ -+ uint32_t clock_gating; -+ -+ if (dev_priv->uopt.clock_gating == 1) { -+ PSB_DEBUG_INIT("Disabling clock gating.\n"); -+ -+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT); -+ -+ } else if (dev_priv->uopt.clock_gating == 2) { -+ PSB_DEBUG_INIT("Enabling clock gating.\n"); -+ -+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) | -+ (_PSB_C_CLKGATECTL_CLKG_AUTO << -+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT); -+ } else -+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL); -+ -+#ifdef FIX_TG_2D_CLOCKGATE -+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK; -+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED << -+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT); -+#endif -+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL); -+ (void) PSB_RSGX32(PSB_CR_CLKGATECTL); -+} -+ -+#define FB_REG06 0xD0810600 -+#define FB_MIPI_DISABLE BIT11 -+#define FB_REG09 0xD0810900 -+#define FB_SKU_MASK (BIT12|BIT13|BIT14) -+#define FB_SKU_SHIFT 12 -+#define FB_SKU_100 0 -+#define FB_SKU_100L 1 -+#define FB_SKU_83 2 -+#if 1 /* FIXME remove it after PO */ -+#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22) -+#define FB_GFX_CLK_DIVIDE_SHIFT 20 -+#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24) -+#define FB_VED_CLK_DIVIDE_SHIFT 23 -+#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26) -+#define FB_VEC_CLK_DIVIDE_SHIFT 25 -+#endif /* FIXME remove it after PO */ -+ -+ -+void mrst_get_fuse_settings(struct drm_psb_private *dev_priv) -+{ -+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); -+ uint32_t fuse_value = 0; -+ uint32_t fuse_value_tmp = 0; -+ -+ pci_write_config_dword(pci_root, 0xD0, FB_REG06); -+ pci_read_config_dword(pci_root, 0xD4, &fuse_value); -+ -+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE; -+ -+ DRM_INFO("internal display is %s\n", -+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display"); -+ -+ pci_write_config_dword(pci_root, 0xD0, FB_REG09); -+ pci_read_config_dword(pci_root, 0xD4, &fuse_value); -+ -+ DRM_INFO("SKU values is 0x%x. \n", fuse_value); -+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT; -+ -+ switch (fuse_value_tmp) { -+ case FB_SKU_100: -+ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n"); -+ dev_priv->sku_100 = true; -+ break; -+ case FB_SKU_100L: -+ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n"); -+ dev_priv->sku_100L = true; -+ break; -+ case FB_SKU_83: -+ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n"); -+ dev_priv->sku_83 = true; -+ break; -+ default: -+ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n", -+ fuse_value_tmp); -+ } -+ -+#if 1 /* FIXME remove it after PO */ -+ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT; -+ -+ switch (fuse_value_tmp) { -+ case 0: -+ DRM_INFO("Gfx clk : core clk = 1:1. \n"); -+ break; -+ case 1: -+ DRM_INFO("Gfx clk : core clk = 4:3. \n"); -+ break; -+ case 2: -+ DRM_INFO("Gfx clk : core clk = 8:5. \n"); -+ break; -+ case 3: -+ DRM_INFO("Gfx clk : core clk = 2:1. \n"); -+ break; -+ case 5: -+ DRM_INFO("Gfx clk : core clk = 8:3. \n"); -+ break; -+ case 6: -+ DRM_INFO("Gfx clk : core clk = 16:5. \n"); -+ break; -+ default: -+ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n", -+ fuse_value_tmp); -+ } -+ -+ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT; -+ -+ switch (fuse_value_tmp) { -+ case 0: -+ DRM_INFO("Ved clk : core clk = 1:1. \n"); -+ break; -+ case 1: -+ DRM_INFO("Ved clk : core clk = 4:3. \n"); -+ break; -+ case 2: -+ DRM_INFO("Ved clk : core clk = 8:5. \n"); -+ break; -+ case 3: -+ DRM_INFO("Ved clk : core clk = 2:1. \n"); -+ break; -+ default: -+ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n", -+ fuse_value_tmp); -+ } -+ -+ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT; -+ -+ switch (fuse_value_tmp) { -+ case 0: -+ DRM_INFO("Vec clk : core clk = 1:1. \n"); -+ break; -+ case 1: -+ DRM_INFO("Vec clk : core clk = 4:3. \n"); -+ break; -+ case 2: -+ DRM_INFO("Vec clk : core clk = 8:5. \n"); -+ break; -+ case 3: -+ DRM_INFO("Vec clk : core clk = 2:1. \n"); -+ break; -+ default: -+ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n", -+ fuse_value_tmp); -+ } -+#endif /* FIXME remove it after PO */ -+ -+ return; -+} -+ -+static int psb_do_init(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ struct psb_gtt *pg = dev_priv->pg; -+ -+ uint32_t stolen_gtt; -+ uint32_t tt_start; -+ uint32_t tt_pages; -+ -+ int ret = -ENOMEM; -+ -+ dev_priv->ta_mem_pages = -+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, -+ PAGE_SIZE) >> PAGE_SHIFT; -+ dev_priv->comm_page = alloc_page(GFP_KERNEL); -+ if (!dev_priv->comm_page) -+ goto out_err; -+ -+ dev_priv->comm = kmap(dev_priv->comm_page); -+ memset((void *) dev_priv->comm, 0, PAGE_SIZE); -+ -+ set_pages_uc(dev_priv->comm_page, 1); -+ -+ /* -+ * Initialize sequence numbers for the different command -+ * submission mechanisms. -+ */ -+ -+ dev_priv->sequence[PSB_ENGINE_2D] = 0; -+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0; -+ dev_priv->sequence[PSB_ENGINE_TA] = 0; -+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0; -+ -+ if (pg->gatt_start & 0x0FFFFFFF) { -+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n"); -+ ret = -EINVAL; -+ goto out_err; -+ } -+ -+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4; -+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ stolen_gtt = -+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages; -+ -+ dev_priv->gatt_free_offset = pg->gatt_start + -+ (stolen_gtt << PAGE_SHIFT) * 1024; -+ -+ /* -+ * Insert a cache-coherent communications page in mmu space -+ * just after the stolen area. Will be used for fencing etc. -+ */ -+ -+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset; -+ dev_priv->gatt_free_offset += PAGE_SIZE; -+ -+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), -+ &dev_priv->comm_page, -+ dev_priv->comm_mmu_offset, 1, 0, 0, 0); -+ -+ if (ret) -+ goto out_err; -+ -+ if (1 || drm_debug) { -+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID); -+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION); -+ DRM_INFO("SGX core id = 0x%08x\n", core_id); -+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n", -+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >> -+ _PSB_CC_REVISION_MAJOR_SHIFT, -+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >> -+ _PSB_CC_REVISION_MINOR_SHIFT); -+ DRM_INFO -+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n", -+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >> -+ _PSB_CC_REVISION_MAINTENANCE_SHIFT, -+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >> -+ _PSB_CC_REVISION_DESIGNER_SHIFT); -+ } -+ -+ spin_lock_init(&dev_priv->irqmask_lock); -+ dev_priv->fence0_irq_on = 0; -+ -+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? -+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT; -+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start; -+ tt_pages -= tt_start >> PAGE_SHIFT; -+ -+ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0, -+ pg->vram_stolen_size >> PAGE_SHIFT)) { -+ dev_priv->have_vram = 1; -+ } -+ -+ if (!ttm_bo_init_mm(bdev, TTM_PL_CI, 0, -+ dev_priv->ci_region_size >> PAGE_SHIFT)) { -+ dev_priv->have_camera = 1; -+ } -+ -+ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT, -+ tt_pages)) { -+ dev_priv->have_tt = 1; -+ } -+ -+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000, -+ (pg->gatt_start - PSB_MEM_MMU_START - -+ pg->ci_stolen_size) >> PAGE_SHIFT)) { -+ dev_priv->have_mem_mmu = 1; -+ } -+ -+ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000, -+ (PSB_MEM_MMU_START - -+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) { -+ dev_priv->have_mem_rastgeom = 1; -+ } -+#if 0 -+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) { -+ if (!ttm_bo_init_mm -+ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT, -+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) { -+ dev_priv->have_mem_aper = 1; -+ } -+ } -+#endif -+ -+ PSB_DEBUG_INIT("Init MSVDX\n"); -+ dev_priv->has_msvdx = 1; -+ if (psb_msvdx_init(dev)) -+ dev_priv->has_msvdx = 0; -+ -+ if (IS_MRST(dev)) { -+ PSB_DEBUG_INIT("Init Topaz\n"); -+ dev_priv->has_topaz = 1; -+ if (lnc_topaz_init(dev)) -+ dev_priv->has_topaz = 0; -+ } -+ return 0; -+out_err: -+ psb_do_takedown(dev); -+ return ret; -+} -+ -+static int psb_driver_unload(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ if (drm_psb_no_fb == 0) -+ psb_modeset_cleanup(dev); -+ -+ if (dev_priv) { -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ -+ psb_watchdog_takedown(dev_priv); -+ psb_do_takedown(dev); -+ psb_xhw_takedown(dev_priv); -+ psb_scheduler_takedown(&dev_priv->scheduler); -+ -+ if (dev_priv->have_mem_pds) { -+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS); -+ dev_priv->have_mem_pds = 0; -+ } -+ if (dev_priv->have_mem_kernel) { -+ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL); -+ dev_priv->have_mem_kernel = 0; -+ } -+ -+ if (dev_priv->pf_pd) { -+ psb_mmu_free_pagedir(dev_priv->pf_pd); -+ dev_priv->pf_pd = NULL; -+ } -+ if (dev_priv->mmu) { -+ struct psb_gtt *pg = dev_priv->pg; -+ -+ down_read(&pg->sem); -+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd -+ (dev_priv->mmu), -+ pg->gatt_start, -+ pg->vram_stolen_size >> -+ PAGE_SHIFT); -+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd -+ (dev_priv->mmu), -+ pg->gatt_start - pg->ci_stolen_size, -+ pg->ci_stolen_size >> -+ PAGE_SHIFT); -+ up_read(&pg->sem); -+ psb_mmu_driver_takedown(dev_priv->mmu); -+ dev_priv->mmu = NULL; -+ } -+ psb_gtt_takedown(dev_priv->pg, 1); -+ if (dev_priv->scratch_page) { -+ __free_page(dev_priv->scratch_page); -+ dev_priv->scratch_page = NULL; -+ } -+ if (dev_priv->has_bo_device) { -+ ttm_bo_device_release(&dev_priv->bdev); -+ dev_priv->has_bo_device = 0; -+ } -+ if (dev_priv->has_fence_device) { -+ ttm_fence_device_release(&dev_priv->fdev); -+ dev_priv->has_fence_device = 0; -+ } -+ if (dev_priv->vdc_reg) { -+ iounmap(dev_priv->vdc_reg); -+ dev_priv->vdc_reg = NULL; -+ } -+ if (dev_priv->sgx_reg) { -+ iounmap(dev_priv->sgx_reg); -+ dev_priv->sgx_reg = NULL; -+ } -+ if (dev_priv->msvdx_reg) { -+ iounmap(dev_priv->msvdx_reg); -+ dev_priv->msvdx_reg = NULL; -+ } -+ -+ if (IS_MRST(dev)) { -+ if (dev_priv->topaz_reg) { -+ iounmap(dev_priv->topaz_reg); -+ dev_priv->topaz_reg = NULL; -+ } -+ } -+ -+ if (dev_priv->tdev) -+ ttm_object_device_release(&dev_priv->tdev); -+ -+ if (dev_priv->has_global) -+ psb_ttm_global_release(dev_priv); -+ -+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); -+ dev->dev_private = NULL; -+ } -+ return 0; -+} -+ -+ -+static int psb_driver_load(struct drm_device *dev, unsigned long chipset) -+{ -+ struct drm_psb_private *dev_priv; -+ struct ttm_bo_device *bdev; -+ unsigned long resource_start; -+ struct psb_gtt *pg; -+ int ret = -ENOMEM; -+ -+ if (IS_MRST(dev)) -+ DRM_INFO("Run drivers on Moorestown platform!\n"); -+ else -+ DRM_INFO("Run drivers on Poulsbo platform!\n"); -+ -+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER); -+ if (dev_priv == NULL) -+ return -ENOMEM; -+ -+ dev_priv->dev = dev; -+ bdev = &dev_priv->bdev; -+ -+ ret = psb_ttm_global_init(dev_priv); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ dev_priv->has_global = 1; -+ -+ dev_priv->tdev = ttm_object_device_init -+ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER); -+ if (unlikely(dev_priv->tdev == NULL)) -+ goto out_err; -+ -+ mutex_init(&dev_priv->temp_mem); -+ mutex_init(&dev_priv->cmdbuf_mutex); -+ mutex_init(&dev_priv->reset_mutex); -+ INIT_LIST_HEAD(&dev_priv->context.validate_list); -+ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list); -+ psb_init_disallowed(); -+ -+#ifdef FIX_TG_16 -+ atomic_set(&dev_priv->lock_2d, 0); -+ atomic_set(&dev_priv->ta_wait_2d, 0); -+ atomic_set(&dev_priv->ta_wait_2d_irq, 0); -+ atomic_set(&dev_priv->waiters_2d, 0);; -+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d); -+#else -+ mutex_init(&dev_priv->mutex_2d); -+#endif -+ -+ spin_lock_init(&dev_priv->reloc_lock); -+ -+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue); -+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue); -+ -+ dev->dev_private = (void *) dev_priv; -+ dev_priv->chipset = chipset; -+ psb_set_uopt(&dev_priv->uopt); -+ -+ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n"); -+ psb_watchdog_init(dev_priv); -+ psb_scheduler_init(dev, &dev_priv->scheduler); -+ -+ -+ PSB_DEBUG_INIT("Mapping MMIO\n"); -+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE); -+ -+ if (IS_MRST(dev)) -+ dev_priv->msvdx_reg = -+ ioremap(resource_start + MRST_MSVDX_OFFSET, -+ PSB_MSVDX_SIZE); -+ else -+ dev_priv->msvdx_reg = -+ ioremap(resource_start + PSB_MSVDX_OFFSET, -+ PSB_MSVDX_SIZE); -+ -+ if (!dev_priv->msvdx_reg) -+ goto out_err; -+ -+ if (IS_MRST(dev)) { -+ dev_priv->topaz_reg = -+ ioremap(resource_start + LNC_TOPAZ_OFFSET, -+ LNC_TOPAZ_SIZE); -+ if (!dev_priv->topaz_reg) -+ goto out_err; -+ } -+ -+ dev_priv->vdc_reg = -+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE); -+ if (!dev_priv->vdc_reg) -+ goto out_err; -+ -+ if (IS_MRST(dev)) -+ dev_priv->sgx_reg = -+ ioremap(resource_start + MRST_SGX_OFFSET, -+ PSB_SGX_SIZE); -+ else -+ dev_priv->sgx_reg = -+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE); -+ -+ if (!dev_priv->sgx_reg) -+ goto out_err; -+ -+ if (IS_MRST(dev)) -+ mrst_get_fuse_settings(dev_priv); -+ -+ PSB_DEBUG_INIT("Init TTM fence and BO driver\n"); -+ -+ get_ci_info(dev_priv); -+ -+ psb_clockgating(dev_priv); -+ -+ ret = psb_ttm_fence_device_init(&dev_priv->fdev); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ dev_priv->has_fence_device = 1; -+ ret = ttm_bo_device_init(bdev, -+ dev_priv->mem_global_ref.object, -+ &psb_ttm_bo_driver, -+ DRM_PSB_FILE_PAGE_OFFSET); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ dev_priv->has_bo_device = 1; -+ ttm_lock_init(&dev_priv->ttm_lock); -+ -+ ret = -ENOMEM; -+ -+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO); -+ if (!dev_priv->scratch_page) -+ goto out_err; -+ -+ set_pages_uc(dev_priv->scratch_page, 1); -+ -+ dev_priv->pg = psb_gtt_alloc(dev); -+ if (!dev_priv->pg) -+ goto out_err; -+ -+ ret = psb_gtt_init(dev_priv->pg, 0); -+ if (ret) -+ goto out_err; -+ -+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg, -+ drm_psb_trap_pagefaults, 0, -+ dev_priv); -+ if (!dev_priv->mmu) -+ goto out_err; -+ -+ pg = dev_priv->pg; -+ -+ /* -+ * Make sgx MMU aware of the stolen memory area we call VRAM. -+ */ -+ -+ down_read(&pg->sem); -+ ret = -+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd -+ (dev_priv->mmu), -+ pg->stolen_base >> PAGE_SHIFT, -+ pg->gatt_start, -+ pg->vram_stolen_size >> PAGE_SHIFT, 0); -+ up_read(&pg->sem); -+ if (ret) -+ goto out_err; -+ -+ /* -+ * Make sgx MMU aware of the stolen memory area we call VRAM. -+ */ -+ -+ down_read(&pg->sem); -+ ret = -+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd -+ (dev_priv->mmu), -+ dev_priv->ci_region_start >> PAGE_SHIFT, -+ pg->gatt_start - pg->ci_stolen_size, -+ pg->ci_stolen_size >> PAGE_SHIFT, 0); -+ up_read(&pg->sem); -+ if (ret) -+ goto out_err; -+ -+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0); -+ if (!dev_priv->pf_pd) -+ goto out_err; -+ -+ /* -+ * Make all presumably unused requestors page-fault by making them -+ * use context 1 which does not have any valid mappings. -+ */ -+ -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); -+ PSB_RSGX32(PSB_CR_BIF_BANK1); -+ -+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); -+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1); -+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK); -+ -+ psb_init_2d(dev_priv); -+ -+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000, -+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START) -+ >> PAGE_SHIFT); -+ if (ret) -+ goto out_err; -+ dev_priv->have_mem_kernel = 1; -+ -+ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000, -+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START) -+ >> PAGE_SHIFT); -+ if (ret) -+ goto out_err; -+ dev_priv->have_mem_pds = 1; -+ -+ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n"); -+ -+ ret = psb_do_init(dev); -+ if (ret) -+ return ret; -+ -+ ret = psb_xhw_init(dev); -+ if (ret) -+ return ret; -+ -+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE); -+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE); -+ -+ psb_init_ospm(dev_priv); -+ -+ if (drm_psb_no_fb == 0) { -+ psb_modeset_init(dev); -+ drm_helper_initial_config(dev, false); -+ } -+ -+ /*initialize the MSI for MRST*/ -+ if (IS_MRST(dev)) { -+ if (pci_enable_msi(dev->pdev)) { -+ DRM_ERROR("Enable MSI for MRST failed!\n"); -+ } else { -+ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n", -+ dev->pdev->irq); -+ /* pci_write_config_word(pdev, 0x04, 0x07); */ -+ } -+ } -+ -+ /*set SGX in low power mode*/ -+ if (drm_psb_ospm && IS_MRST(dev)) -+ if (psb_try_power_down_sgx(dev)) -+ PSB_DEBUG_PM("initialize SGX to low power failed\n"); -+ return 0; -+out_err: -+ psb_driver_unload(dev); -+ return ret; -+} -+ -+int psb_driver_device_is_agp(struct drm_device *dev) -+{ -+ return 0; -+} -+ -+static int psb_prepare_msvdx_suspend(struct drm_device *dev) -+{ -+#ifdef PSB_FIXME -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[PSB_ENGINE_VIDEO]; -+ struct ttm_fence_object *fence; -+ int ret = 0; -+ int signaled = 0; -+ int count = 0; -+ unsigned long _end = jiffies + 3 * DRM_HZ; -+ -+ PSB_DEBUG_GENERAL -+ ("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n"); -+ -+ /*set the msvdx-reset flag here.. */ -+ dev_priv->msvdx_needs_reset = 1; -+ -+ /*Ensure that all pending IRQs are serviced, */ -+ -+ /* -+ * Save the last MSVDX fence in dev_priv instead!!! -+ * Need to be fc->write_locked while accessing a fence from the ring. -+ */ -+ -+ list_for_each_entry(fence, &fc->ring, ring) { -+ count++; -+ do { -+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, -+ (signaled = -+ ttm_fence_object_signaled(fence, -+ DRM_FENCE_TYPE_EXE))); -+ if (signaled) -+ break; -+ if (time_after_eq(jiffies, _end)) -+ PSB_DEBUG_GENERAL -+ ("MSVDXACPI: fence 0x%x didn't get" -+ " signaled for 3 secs; " -+ "we will suspend anyways\n", -+ (unsigned int) fence); -+ } while (ret == -EINTR); -+ -+ } -+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n", -+ count); -+#endif -+ return 0; -+} -+ -+static int psb_suspend(struct pci_dev *pdev, pm_message_t state) -+{ -+ struct drm_device *dev = pci_get_drvdata(pdev); -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ if (!down_write_trylock(&dev_priv->sgx_sem)) -+ return -EBUSY; -+ if (dev_priv->graphics_state != PSB_PWR_STATE_D0i0); -+ PSB_DEBUG_PM("Not suspending from D0i0\n"); -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3) -+ goto exit; -+ if (drm_psb_no_fb == 0){ -+ psbfb_suspend(dev); -+ psb_modeset_cleanup(dev); -+ } -+ -+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL); -+ (void) psb_idle_3d(dev); -+ (void) psb_idle_2d(dev); -+ flush_scheduled_work(); -+ -+ if (dev_priv->has_msvdx) -+ psb_prepare_msvdx_suspend(dev); -+ -+ if (dev_priv->has_topaz) -+ lnc_prepare_topaz_suspend(dev); -+ -+#ifdef OSPM_STAT -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0) -+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change; -+ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3) -+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change; -+ else -+ PSB_DEBUG_PM("suspend: illegal previous power state\n"); -+ dev_priv->gfx_last_mode_change = jiffies; -+ dev_priv->gfx_d3_cnt++; -+#endif -+ -+ dev_priv->graphics_state = PSB_PWR_STATE_D3; -+ dev_priv->msvdx_state = PSB_PWR_STATE_D3; -+ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF; -+ pci_save_state(pdev); -+ pci_disable_device(pdev); -+ pci_set_power_state(pdev, PCI_D3hot); -+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND -+ | PSB_VIDEO_DEC_ISLAND); -+exit: -+ up_write(&dev_priv->sgx_sem); -+ return 0; -+} -+ -+static int psb_resume(struct pci_dev *pdev) -+{ -+ struct drm_device *dev = pci_get_drvdata(pdev); -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct psb_gtt *pg = dev_priv->pg; -+ int ret; -+ if (dev_priv->graphics_state != PSB_PWR_STATE_D3) -+ return 0; -+ -+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND -+ | PSB_VIDEO_DEC_ISLAND); -+ pci_set_power_state(pdev, PCI_D0); -+ pci_restore_state(pdev); -+ ret = pci_enable_device(pdev); -+ if (ret) -+ return ret; -+ -+ DRM_ERROR("FIXME: topaz's resume is not ready..\n"); -+#ifdef OSPM_STAT -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D3) -+ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change; -+ else -+ PSB_DEBUG_PM("resume :illegal previous power state\n"); -+ dev_priv->gfx_last_mode_change = jiffies; -+ dev_priv->gfx_d0i0_cnt++; -+#endif -+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0; -+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0; -+ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON; -+ INIT_LIST_HEAD(&dev_priv->resume_buf.head); -+ dev_priv->msvdx_needs_reset = 1; -+ -+ lnc_prepare_topaz_resume(dev); -+ -+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); -+ pci_write_config_word(pdev, PSB_GMCH_CTRL, -+ pg->gmch_ctrl | _PSB_GMCH_ENABLED); -+ -+ /* -+ * Don't reinitialize the GTT as it is unnecessary. The gtt is -+ * stored in memory so it will automatically be restored. All -+ * we need to do is restore the PGETBL_CTL which we already do -+ * above. -+ */ -+ -+ //psb_gtt_init(dev_priv->pg, 1); -+ -+ /* -+ * The SGX loses it's register contents. -+ * Restore BIF registers. The MMU page tables are -+ * "normal" pages, so their contents should be kept. -+ */ -+ -+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL); -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); -+ PSB_RSGX32(PSB_CR_BIF_BANK1); -+ -+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); -+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1); -+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK); -+ -+ /* -+ * 2D Base registers.. -+ */ -+ psb_init_2d(dev_priv); -+ -+ /* -+ * Persistant 3D base registers and USSE base registers.. -+ */ -+ -+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE); -+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE); -+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2); -+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE); -+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ -+ /* -+ * Now, re-initialize the 3D engine. -+ */ -+ -+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf); -+ -+ psb_scheduler_ta_mem_check(dev_priv); -+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) { -+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf, -+ PSB_TA_MEM_FLAG_TA | -+ PSB_TA_MEM_FLAG_RASTER | -+ PSB_TA_MEM_FLAG_HOSTA | -+ PSB_TA_MEM_FLAG_HOSTD | -+ PSB_TA_MEM_FLAG_INIT, -+ dev_priv->ta_mem->ta_memory->offset, -+ dev_priv->ta_mem->hw_data->offset, -+ dev_priv->ta_mem->hw_cookie); -+ } -+ -+ if (drm_psb_no_fb == 0) { -+ psb_modeset_init(dev); -+ drm_helper_initial_config(dev, false); -+ psbfb_resume(dev); -+ } -+ return 0; -+} -+ -+int psb_extension_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ union drm_psb_extension_arg *arg = data; -+ struct drm_psb_extension_rep *rep = &arg->rep; -+ -+ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) { -+ rep->exists = 1; -+ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET; -+ rep->sarea_offset = 0; -+ rep->major = 1; -+ rep->minor = 0; -+ rep->pl = 0; -+ return 0; -+ } -+ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) { -+ rep->exists = 1; -+ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET; -+ rep->sarea_offset = 0; -+ rep->major = 1; -+ rep->minor = 0; -+ rep->pl = 0; -+ return 0; -+ } -+ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) { -+ rep->exists = 1; -+ rep->driver_ioctl_offset = DRM_PSB_CMDBUF; -+ rep->sarea_offset = 0; -+ rep->major = 1; -+ rep->minor = 0; -+ rep->pl = 0; -+ return 0; -+ } -+ -+ rep->exists = 0; -+ return 0; -+} -+ -+static int psb_vt_leave_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ struct ttm_mem_type_manager *man; -+ int clean; -+ int ret; -+ -+ ret = ttm_write_lock(&dev_priv->ttm_lock, 1, -+ psb_fpriv(file_priv)->tfile); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ /* -+ * Clean VRAM and TT for fbdev. -+ */ -+ -+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); -+ if (unlikely(ret != 0)) -+ goto out_unlock; -+ -+ man = &bdev->man[TTM_PL_VRAM]; -+ spin_lock(&bdev->lru_lock); -+ clean = drm_mm_clean(&man->manager); -+ spin_unlock(&bdev->lru_lock); -+ if (unlikely(!clean)) -+ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n"); -+ -+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT); -+ if (unlikely(ret != 0)) -+ goto out_unlock; -+ -+ man = &bdev->man[TTM_PL_TT]; -+ spin_lock(&bdev->lru_lock); -+ clean = drm_mm_clean(&man->manager); -+ spin_unlock(&bdev->lru_lock); -+ if (unlikely(!clean)) -+ DRM_INFO("Warning: GATT was not clean after VT switch.\n"); -+ -+ ttm_bo_swapout_all(&dev_priv->bdev); -+ -+ return 0; -+out_unlock: -+ (void) ttm_write_unlock(&dev_priv->ttm_lock, -+ psb_fpriv(file_priv)->tfile); -+ return ret; -+} -+ -+static int psb_vt_enter_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ return ttm_write_unlock(&dev_priv->ttm_lock, -+ psb_fpriv(file_priv)->tfile); -+} -+ -+/* always available as we are SIGIO'd */ -+static unsigned int psb_poll(struct file *filp, -+ struct poll_table_struct *wait) -+{ -+ return POLLIN | POLLRDNORM; -+} -+ -+int psb_driver_open(struct drm_device *dev, struct drm_file *priv) -+{ -+ /*psb_check_power_state(dev, PSB_DEVICE_SGX);*/ -+ return 0; -+} -+ -+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, -+ unsigned long arg) -+{ -+ struct drm_file *file_priv = filp->private_data; -+ struct drm_device *dev = file_priv->minor->dev; -+ unsigned int nr = DRM_IOCTL_NR(cmd); -+ long ret; -+ -+ /* -+ * The driver private ioctls and TTM ioctls should be -+ * thread-safe. -+ */ -+ -+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) -+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { -+ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE]; -+ -+ if (unlikely(ioctl->cmd != cmd)) { -+ DRM_ERROR("Invalid drm command %d\n", -+ nr - DRM_COMMAND_BASE); -+ return -EINVAL; -+ } -+ -+ return drm_unlocked_ioctl(filp, cmd, arg); -+ } -+ /* -+ * Not all old drm ioctls are thread-safe. -+ */ -+ -+ lock_kernel(); -+ ret = drm_unlocked_ioctl(filp, cmd, arg); -+ unlock_kernel(); -+ return ret; -+} -+ -+static int psb_ospm_read(char *buf, char **start, off_t offset, int request, -+ int *eof, void *data) -+{ -+ struct drm_minor *minor = (struct drm_minor *) data; -+ struct drm_device *dev = minor->dev; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ int len = 0; -+ unsigned long d0i0 = 0; -+ unsigned long d0i3 = 0; -+ unsigned long d3 = 0; -+ *start = &buf[offset]; -+ *eof = 0; -+ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled"); -+ switch (dev_priv->graphics_state) { -+ case PSB_PWR_STATE_D0i0: -+ DRM_PROC_PRINT("GFX:%s\n", "D0i0"); -+ break; -+ case PSB_PWR_STATE_D0i3: -+ DRM_PROC_PRINT("GFX:%s\n", "D0i3"); -+ break; -+ case PSB_PWR_STATE_D3: -+ DRM_PROC_PRINT("GFX:%s\n", "D3"); -+ break; -+ default: -+ DRM_PROC_PRINT("GFX:%s\n", "unkown"); -+ } -+#ifdef OSPM_STAT -+ d0i0 = dev_priv->gfx_d0i0_time * 1000 / HZ; -+ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ; -+ d3 = dev_priv->gfx_d3_time * 1000 / HZ; -+ switch (dev_priv->graphics_state) { -+ case PSB_PWR_STATE_D0i0: -+ d0i0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; -+ break; -+ case PSB_PWR_STATE_D0i3: -+ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; -+ break; -+ case PSB_PWR_STATE_D3: -+ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ; -+ break; -+ } -+ DRM_PROC_PRINT("GFX(cnt/ms):\n"); -+ DRM_PROC_PRINT("D0i0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n", -+ dev_priv->gfx_d0i0_cnt, d0i0, dev_priv->gfx_d0i3_cnt, d0i3, -+ dev_priv->gfx_d3_cnt, d3); -+#endif -+ if (len > request + offset) -+ return request; -+ *eof = 1; -+ return len - offset; -+} -+ -+static int psb_proc_init(struct drm_minor *minor) -+{ -+ struct proc_dir_entry *ent; -+ if (!minor->dev_root) -+ return 0; -+ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->dev_root, -+ psb_ospm_read, minor); -+ if (ent) -+ return 0; -+ else -+ return -1; -+} -+ -+static void psb_proc_cleanup(struct drm_minor *minor) -+{ -+ if (!minor->dev_root) -+ return; -+ remove_proc_entry(OSPM_PROC_ENTRY, minor->dev_root); -+ return; -+} -+ -+static struct drm_driver driver = { -+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, -+ .load = psb_driver_load, -+ .unload = psb_driver_unload, -+ .dri_library_name = dri_library_name, -+ .get_reg_ofs = drm_core_get_reg_ofs, -+ .ioctls = psb_ioctls, -+ .device_is_agp = psb_driver_device_is_agp, -+ .irq_preinstall = psb_irq_preinstall, -+ .irq_postinstall = psb_irq_postinstall, -+ .irq_uninstall = psb_irq_uninstall, -+ .irq_handler = psb_irq_handler, -+ .firstopen = NULL, -+ .lastclose = psb_lastclose, -+ .open = psb_driver_open, -+ .proc_init = psb_proc_init, -+ .proc_cleanup = psb_proc_cleanup, -+ .fops = { -+ .owner = THIS_MODULE, -+ .open = psb_open, -+ .release = psb_release, -+ .unlocked_ioctl = psb_unlocked_ioctl, -+ .mmap = psb_mmap, -+ .poll = psb_poll, -+ .fasync = drm_fasync, -+ }, -+ .pci_driver = { -+ .name = DRIVER_NAME, -+ .id_table = pciidlist, -+ .resume = psb_resume, -+ .suspend = psb_suspend, -+ }, -+ .name = DRIVER_NAME, -+ .desc = DRIVER_DESC, -+ .date = PSB_DRM_DRIVER_DATE, -+ .major = PSB_DRM_DRIVER_MAJOR, -+ .minor = PSB_DRM_DRIVER_MINOR, -+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL -+}; -+ -+static int __init psb_init(void) -+{ -+ driver.num_ioctls = psb_max_ioctl; -+ -+ return drm_init(&driver); -+} -+ -+static void __exit psb_exit(void) -+{ -+ drm_exit(&driver); -+} -+ -+module_init(psb_init); -+module_exit(psb_exit); -+ -+MODULE_AUTHOR(DRIVER_AUTHOR); -+MODULE_DESCRIPTION(DRIVER_DESC); -+MODULE_LICENSE("GPL"); -diff -uNr a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h ---- a/drivers/gpu/drm/psb/psb_drv.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_drv.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1129 @@ -+/************************************************************************** -+ *Copyright (c) 2007-2008, Intel Corporation. -+ *All Rights Reserved. -+ * -+ *This program is free software; you can redistribute it and/or modify it -+ *under the terms and conditions of the GNU General Public License, -+ *version 2, as published by the Free Software Foundation. -+ * -+ *This program is distributed in the hope it will be useful, but WITHOUT -+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ *more details. -+ * -+ *You should have received a copy of the GNU General Public License along with -+ *this program; if not, write to the Free Software Foundation, Inc., -+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ *develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+#ifndef _PSB_DRV_H_ -+#define _PSB_DRV_H_ -+ -+#include <drm/drmP.h> -+#include "psb_drm.h" -+#include "psb_reg.h" -+#include "psb_schedule.h" -+#include "psb_intel_drv.h" -+#include "ttm/ttm_object.h" -+#include "ttm/ttm_fence_driver.h" -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_lock.h" -+ -+extern struct ttm_bo_driver psb_ttm_bo_driver; -+ -+enum { -+ CHIP_PSB_8108 = 0, -+ CHIP_PSB_8109 = 1, -+ CHIP_MRST_4100 = 2 -+}; -+ -+/* -+ *Hardware bugfixes -+ */ -+ -+#define FIX_TG_16 -+#define FIX_TG_2D_CLOCKGATE -+#define OSPM_STAT -+ -+#define DRIVER_NAME "psb" -+#define DRIVER_DESC "drm driver for the Intel GMA500" -+#define DRIVER_AUTHOR "Tungsten Graphics Inc." -+#define OSPM_PROC_ENTRY "ospm" -+ -+#define PSB_DRM_DRIVER_DATE "2009-02-09" -+#define PSB_DRM_DRIVER_MAJOR 8 -+#define PSB_DRM_DRIVER_MINOR 0 -+#define PSB_DRM_DRIVER_PATCHLEVEL 0 -+ -+/* -+ *TTM driver private offsets. -+ */ -+ -+#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) -+ -+#define PSB_OBJECT_HASH_ORDER 13 -+#define PSB_FILE_OBJECT_HASH_ORDER 12 -+#define PSB_BO_HASH_ORDER 12 -+ -+#define PSB_VDC_OFFSET 0x00000000 -+#define PSB_VDC_SIZE 0x000080000 -+#define MRST_MMIO_SIZE 0x0000C0000 -+#define PSB_SGX_SIZE 0x8000 -+#define PSB_SGX_OFFSET 0x00040000 -+#define MRST_SGX_OFFSET 0x00080000 -+#define PSB_MMIO_RESOURCE 0 -+#define PSB_GATT_RESOURCE 2 -+#define PSB_GTT_RESOURCE 3 -+#define PSB_GMCH_CTRL 0x52 -+#define PSB_BSM 0x5C -+#define _PSB_GMCH_ENABLED 0x4 -+#define PSB_PGETBL_CTL 0x2020 -+#define _PSB_PGETBL_ENABLED 0x00000001 -+#define PSB_SGX_2D_SLAVE_PORT 0x4000 -+#define PSB_TT_PRIV0_LIMIT (256*1024*1024) -+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT) -+#define PSB_NUM_VALIDATE_BUFFERS 2048 -+#define PSB_MEM_KERNEL_START 0x10000000 -+#define PSB_MEM_PDS_START 0x20000000 -+#define PSB_MEM_MMU_START 0x40000000 -+ -+#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0 -+#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0 -+ -+/* -+ *Flags for external memory type field. -+ */ -+ -+#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */ -+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */ -+/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */ -+#define PSB_MSVDX_SIZE 0x10000 -+ -+#define LNC_TOPAZ_OFFSET 0xA0000 -+#define LNC_TOPAZ_SIZE 0x10000 -+ -+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */ -+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */ -+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */ -+ -+/* -+ *PTE's and PDE's -+ */ -+ -+#define PSB_PDE_MASK 0x003FFFFF -+#define PSB_PDE_SHIFT 22 -+#define PSB_PTE_SHIFT 12 -+ -+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */ -+#define PSB_PTE_WO 0x0002 /* Write only */ -+#define PSB_PTE_RO 0x0004 /* Read only */ -+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */ -+ -+/* -+ *VDC registers and bits -+ */ -+#define PSB_HWSTAM 0x2098 -+#define PSB_INSTPM 0x20C0 -+#define PSB_INT_IDENTITY_R 0x20A4 -+#define _PSB_VSYNC_PIPEB_FLAG (1<<5) -+#define _PSB_VSYNC_PIPEA_FLAG (1<<7) -+#define _PSB_IRQ_SGX_FLAG (1<<18) -+#define _PSB_IRQ_MSVDX_FLAG (1<<19) -+#define _LNC_IRQ_TOPAZ_FLAG (1<<20) -+#define PSB_INT_MASK_R 0x20A8 -+#define PSB_INT_ENABLE_R 0x20A0 -+#define PSB_PIPEASTAT 0x70024 -+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17) -+#define _PSB_VBLANK_CLEAR (1 << 1) -+#define PSB_PIPEBSTAT 0x71024 -+ -+#define _PSB_MMU_ER_MASK 0x0001FF00 -+#define _PSB_MMU_ER_HOST (1 << 16) -+#define GPIOA 0x5010 -+#define GPIOB 0x5014 -+#define GPIOC 0x5018 -+#define GPIOD 0x501c -+#define GPIOE 0x5020 -+#define GPIOF 0x5024 -+#define GPIOG 0x5028 -+#define GPIOH 0x502c -+#define GPIO_CLOCK_DIR_MASK (1 << 0) -+#define GPIO_CLOCK_DIR_IN (0 << 1) -+#define GPIO_CLOCK_DIR_OUT (1 << 1) -+#define GPIO_CLOCK_VAL_MASK (1 << 2) -+#define GPIO_CLOCK_VAL_OUT (1 << 3) -+#define GPIO_CLOCK_VAL_IN (1 << 4) -+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) -+#define GPIO_DATA_DIR_MASK (1 << 8) -+#define GPIO_DATA_DIR_IN (0 << 9) -+#define GPIO_DATA_DIR_OUT (1 << 9) -+#define GPIO_DATA_VAL_MASK (1 << 10) -+#define GPIO_DATA_VAL_OUT (1 << 11) -+#define GPIO_DATA_VAL_IN (1 << 12) -+#define GPIO_DATA_PULLUP_DISABLE (1 << 13) -+ -+#define VCLK_DIVISOR_VGA0 0x6000 -+#define VCLK_DIVISOR_VGA1 0x6004 -+#define VCLK_POST_DIV 0x6010 -+ -+#define PSB_COMM_2D (PSB_ENGINE_2D << 4) -+#define PSB_COMM_3D (PSB_ENGINE_3D << 4) -+#define PSB_COMM_TA (PSB_ENGINE_TA << 4) -+#define PSB_COMM_HP (PSB_ENGINE_HP << 4) -+#define PSB_COMM_USER_IRQ (1024 >> 2) -+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1) -+#define PSB_COMM_FW (2048 >> 2) -+ -+#define PSB_UIRQ_VISTEST 1 -+#define PSB_UIRQ_OOM_REPLY 2 -+#define PSB_UIRQ_FIRE_TA_REPLY 3 -+#define PSB_UIRQ_FIRE_RASTER_REPLY 4 -+ -+#define PSB_2D_SIZE (256*1024*1024) -+#define PSB_MAX_RELOC_PAGES 1024 -+ -+#define PSB_LOW_REG_OFFS 0x0204 -+#define PSB_HIGH_REG_OFFS 0x0600 -+ -+#define PSB_NUM_VBLANKS 2 -+ -+#define PSB_COMM_2D (PSB_ENGINE_2D << 4) -+#define PSB_COMM_3D (PSB_ENGINE_3D << 4) -+#define PSB_COMM_TA (PSB_ENGINE_TA << 4) -+#define PSB_COMM_HP (PSB_ENGINE_HP << 4) -+#define PSB_COMM_FW (2048 >> 2) -+ -+#define PSB_2D_SIZE (256*1024*1024) -+#define PSB_MAX_RELOC_PAGES 1024 -+ -+#define PSB_LOW_REG_OFFS 0x0204 -+#define PSB_HIGH_REG_OFFS 0x0600 -+ -+#define PSB_NUM_VBLANKS 2 -+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10) -+ -+#define PSB_PWR_STATE_MASK 0x0F -+#define PSB_PWR_ACTION_MASK 0xF0 -+#define PSB_PWR_STATE_D0i0 0x1 -+#define PSB_PWR_STATE_D0i3 0x2 -+#define PSB_PWR_STATE_D3 0x3 -+#define PSB_PWR_ACTION_DOWN 0x10 /*Need to power down*/ -+#define PSB_PWR_ACTION_UP 0x20/*Need to power up*/ -+#define PSB_GRAPHICS_ISLAND 0x1 -+#define PSB_VIDEO_ENC_ISLAND 0x2 -+#define PSB_VIDEO_DEC_ISLAND 0x4 -+#define LNC_TOPAZ_POWERON 0x1 -+#define LNC_TOPAZ_POWEROFF 0x0 -+ -+/* -+ *User options. -+ */ -+ -+struct drm_psb_uopt { -+ int clock_gating; -+}; -+ -+/** -+ *struct psb_context -+ * -+ *@buffers: array of pre-allocated validate buffers. -+ *@used_buffers: number of buffers in @buffers array currently in use. -+ *@validate_buffer: buffers validated from user-space. -+ *@kern_validate_buffers : buffers validated from kernel-space. -+ *@fence_flags : Fence flags to be used for fence creation. -+ * -+ *This structure is used during execbuf validation. -+ */ -+ -+struct psb_context { -+ struct psb_validate_buffer *buffers; -+ uint32_t used_buffers; -+ struct list_head validate_list; -+ struct list_head kern_validate_list; -+ uint32_t fence_types; -+ uint32_t val_seq; -+}; -+ -+struct psb_gtt { -+ struct drm_device *dev; -+ int initialized; -+ uint32_t gatt_start; -+ uint32_t gtt_start; -+ uint32_t gtt_phys_start; -+ unsigned gtt_pages; -+ unsigned gatt_pages; -+ uint32_t stolen_base; -+ uint32_t pge_ctl; -+ u16 gmch_ctrl; -+ unsigned long stolen_size; -+ unsigned long vram_stolen_size; -+ unsigned long ci_stolen_size; -+ unsigned long rar_stolen_size; -+ uint32_t *gtt_map; -+ struct rw_semaphore sem; -+}; -+ -+struct psb_use_base { -+ struct list_head head; -+ struct ttm_fence_object *fence; -+ unsigned int reg; -+ unsigned long offset; -+ unsigned int dm; -+}; -+ -+struct psb_validate_buffer; -+ -+struct psb_msvdx_cmd_queue { -+ struct list_head head; -+ void *cmd; -+ unsigned long cmd_size; -+ uint32_t sequence; -+}; -+ -+ -+struct drm_psb_private { -+ -+ /* -+ *TTM Glue. -+ */ -+ -+ struct drm_global_reference mem_global_ref; -+ int has_global; -+ -+ struct drm_device *dev; -+ struct ttm_object_device *tdev; -+ struct ttm_fence_device fdev; -+ struct ttm_bo_device bdev; -+ struct ttm_lock ttm_lock; -+ struct vm_operations_struct *ttm_vm_ops; -+ int has_fence_device; -+ int has_bo_device; -+ -+ unsigned long chipset; -+ -+ struct psb_xhw_buf resume_buf; -+ struct drm_psb_dev_info_arg dev_info; -+ struct drm_psb_uopt uopt; -+ -+ struct psb_gtt *pg; -+ -+ struct page *scratch_page; -+ struct page *comm_page; -+ /* Deleted volatile because it is not recommended to use. */ -+ uint32_t *comm; -+ uint32_t comm_mmu_offset; -+ uint32_t mmu_2d_offset; -+ uint32_t sequence[PSB_NUM_ENGINES]; -+ uint32_t last_sequence[PSB_NUM_ENGINES]; -+ int idle[PSB_NUM_ENGINES]; -+ uint32_t last_submitted_seq[PSB_NUM_ENGINES]; -+ int engine_lockup_2d; -+ -+ struct psb_mmu_driver *mmu; -+ struct psb_mmu_pd *pf_pd; -+ -+ uint8_t *sgx_reg; -+ uint8_t *vdc_reg; -+ uint32_t gatt_free_offset; -+ -+ /* -+ *MSVDX -+ */ -+ int has_msvdx; -+ uint8_t *msvdx_reg; -+ int msvdx_needs_reset; -+ atomic_t msvdx_mmu_invaldc; -+ -+ /* -+ *TOPAZ -+ */ -+ uint8_t *topaz_reg; -+ -+ void *topaz_mtx_reg_state; -+ struct ttm_buffer_object *topaz_mtx_data_mem; -+ uint32_t topaz_cur_codec; -+ uint32_t cur_mtx_data_size; -+ int topaz_needs_reset; -+ int has_topaz; -+#define TOPAZ_MAX_IDELTIME (HZ*30) -+ int topaz_start_idle; -+ unsigned long topaz_idle_start_jiffies; -+ /* used by lnc_topaz_lockup */ -+ uint32_t topaz_current_sequence; -+ uint32_t topaz_last_sequence; -+ uint32_t topaz_finished_sequence; -+ -+ /* -+ *Fencing / irq. -+ */ -+ -+ uint32_t sgx_irq_mask; -+ uint32_t sgx2_irq_mask; -+ uint32_t vdc_irq_mask; -+ -+ spinlock_t irqmask_lock; -+ spinlock_t sequence_lock; -+ int fence0_irq_on; -+ int irq_enabled; -+ unsigned int irqen_count_2d; -+ wait_queue_head_t event_2d_queue; -+ -+#ifdef FIX_TG_16 -+ wait_queue_head_t queue_2d; -+ atomic_t lock_2d; -+ atomic_t ta_wait_2d; -+ atomic_t ta_wait_2d_irq; -+ atomic_t waiters_2d; -+#else -+ struct mutex mutex_2d; -+#endif -+ uint32_t msvdx_current_sequence; -+ uint32_t msvdx_last_sequence; -+ int fence2_irq_on; -+ -+ /* -+ *Modesetting -+ */ -+ struct psb_intel_mode_device mode_dev; -+ -+ /* -+ *MSVDX Rendec Memory -+ */ -+ struct ttm_buffer_object *ccb0; -+ uint32_t base_addr0; -+ struct ttm_buffer_object *ccb1; -+ uint32_t base_addr1; -+ -+ /* -+ * CI share buffer -+ */ -+ unsigned int ci_region_start; -+ unsigned int ci_region_size; -+ -+ /* -+ *Memory managers -+ */ -+ -+ int have_vram; -+ int have_camera; -+ int have_tt; -+ int have_mem_mmu; -+ int have_mem_aper; -+ int have_mem_kernel; -+ int have_mem_pds; -+ int have_mem_rastgeom; -+ struct mutex temp_mem; -+ -+ /* -+ *Relocation buffer mapping. -+ */ -+ -+ spinlock_t reloc_lock; -+ unsigned int rel_mapped_pages; -+ wait_queue_head_t rel_mapped_queue; -+ -+ /* -+ *SAREA -+ */ -+ struct drm_psb_sarea *sarea_priv; -+ -+ /* -+ *LVDS info -+ */ -+ int backlight_duty_cycle; /* restore backlight to this value */ -+ bool panel_wants_dither; -+ struct drm_display_mode *panel_fixed_mode; -+ -+/* MRST private date start */ -+/*FIXME JLIU7 need to revisit */ -+ bool sku_83; -+ bool sku_100; -+ bool sku_100L; -+ bool sku_bypass; -+ uint32_t iLVDS_enable; -+ -+ /* pipe config register value */ -+ uint32_t pipeconf; -+ -+ /* plane control register value */ -+ uint32_t dspcntr; -+ -+/* MRST_DSI private date start */ -+ /* -+ *MRST DSI info -+ */ -+ /* The DSI device ready */ -+ bool dsi_device_ready; -+ -+ /* The DPI panel power on */ -+ bool dpi_panel_on; -+ -+ /* The DBI panel power on */ -+ bool dbi_panel_on; -+ -+ /* The DPI display */ -+ bool dpi; -+ -+ /* status */ -+ uint32_t videoModeFormat:2; -+ uint32_t laneCount:3; -+ uint32_t status_reserved:27; -+ -+ /* dual display - DPI & DBI */ -+ bool dual_display; -+ -+ /* HS or LP transmission */ -+ bool lp_transmission; -+ -+ /* configuration phase */ -+ bool config_phase; -+ -+ /* DSI clock */ -+ uint32_t RRate; -+ uint32_t DDR_Clock; -+ uint32_t DDR_Clock_Calculated; -+ uint32_t ClockBits; -+ -+ /* DBI Buffer pointer */ -+ u8 *p_DBI_commandBuffer_orig; -+ u8 *p_DBI_commandBuffer; -+ uint32_t DBI_CB_pointer; -+ u8 *p_DBI_dataBuffer_orig; -+ u8 *p_DBI_dataBuffer; -+ uint32_t DBI_DB_pointer; -+ -+ /* DPI panel spec */ -+ uint32_t pixelClock; -+ uint32_t HsyncWidth; -+ uint32_t HbackPorch; -+ uint32_t HfrontPorch; -+ uint32_t HactiveArea; -+ uint32_t VsyncWidth; -+ uint32_t VbackPorch; -+ uint32_t VfrontPorch; -+ uint32_t VactiveArea; -+ uint32_t bpp:5; -+ uint32_t Reserved:27; -+ -+ /* DBI panel spec */ -+ uint32_t dbi_pixelClock; -+ uint32_t dbi_HsyncWidth; -+ uint32_t dbi_HbackPorch; -+ uint32_t dbi_HfrontPorch; -+ uint32_t dbi_HactiveArea; -+ uint32_t dbi_VsyncWidth; -+ uint32_t dbi_VbackPorch; -+ uint32_t dbi_VfrontPorch; -+ uint32_t dbi_VactiveArea; -+ uint32_t dbi_bpp:5; -+ uint32_t dbi_Reserved:27; -+ -+/* MRST_DSI private date end */ -+ -+ /* -+ *Register state -+ */ -+ uint32_t saveDSPACNTR; -+ uint32_t saveDSPBCNTR; -+ uint32_t savePIPEACONF; -+ uint32_t savePIPEBCONF; -+ uint32_t savePIPEASRC; -+ uint32_t savePIPEBSRC; -+ uint32_t saveFPA0; -+ uint32_t saveFPA1; -+ uint32_t saveDPLL_A; -+ uint32_t saveDPLL_A_MD; -+ uint32_t saveHTOTAL_A; -+ uint32_t saveHBLANK_A; -+ uint32_t saveHSYNC_A; -+ uint32_t saveVTOTAL_A; -+ uint32_t saveVBLANK_A; -+ uint32_t saveVSYNC_A; -+ uint32_t saveDSPASTRIDE; -+ uint32_t saveDSPASIZE; -+ uint32_t saveDSPAPOS; -+ uint32_t saveDSPABASE; -+ uint32_t saveDSPASURF; -+ uint32_t saveFPB0; -+ uint32_t saveFPB1; -+ uint32_t saveDPLL_B; -+ uint32_t saveDPLL_B_MD; -+ uint32_t saveHTOTAL_B; -+ uint32_t saveHBLANK_B; -+ uint32_t saveHSYNC_B; -+ uint32_t saveVTOTAL_B; -+ uint32_t saveVBLANK_B; -+ uint32_t saveVSYNC_B; -+ uint32_t saveDSPBSTRIDE; -+ uint32_t saveDSPBSIZE; -+ uint32_t saveDSPBPOS; -+ uint32_t saveDSPBBASE; -+ uint32_t saveDSPBSURF; -+ uint32_t saveVCLK_DIVISOR_VGA0; -+ uint32_t saveVCLK_DIVISOR_VGA1; -+ uint32_t saveVCLK_POST_DIV; -+ uint32_t saveVGACNTRL; -+ uint32_t saveADPA; -+ uint32_t saveLVDS; -+ uint32_t saveDVOA; -+ uint32_t saveDVOB; -+ uint32_t saveDVOC; -+ uint32_t savePP_ON; -+ uint32_t savePP_OFF; -+ uint32_t savePP_CONTROL; -+ uint32_t savePP_CYCLE; -+ uint32_t savePFIT_CONTROL; -+ uint32_t savePaletteA[256]; -+ uint32_t savePaletteB[256]; -+ uint32_t saveBLC_PWM_CTL; -+ uint32_t saveCLOCKGATING; -+ -+ /* -+ *Xhw -+ */ -+ -+ uint32_t *xhw; -+ struct ttm_buffer_object *xhw_bo; -+ struct ttm_bo_kmap_obj xhw_kmap; -+ struct list_head xhw_in; -+ spinlock_t xhw_lock; -+ atomic_t xhw_client; -+ struct drm_file *xhw_file; -+ wait_queue_head_t xhw_queue; -+ wait_queue_head_t xhw_caller_queue; -+ struct mutex xhw_mutex; -+ struct psb_xhw_buf *xhw_cur_buf; -+ int xhw_submit_ok; -+ int xhw_on; -+ -+ /* -+ *Scheduling. -+ */ -+ -+ struct mutex reset_mutex; -+ struct psb_scheduler scheduler; -+ struct mutex cmdbuf_mutex; -+ uint32_t ta_mem_pages; -+ struct psb_ta_mem *ta_mem; -+ int force_ta_mem_load; -+ atomic_t val_seq; -+ -+ /* -+ *TODO: change this to be per drm-context. -+ */ -+ -+ struct psb_context context; -+ -+ /* -+ *Watchdog -+ */ -+ -+ spinlock_t watchdog_lock; -+ struct timer_list watchdog_timer; -+ struct work_struct watchdog_wq; -+ struct work_struct msvdx_watchdog_wq; -+ struct work_struct topaz_watchdog_wq; -+ int timer_available; -+ -+ /* -+ *msvdx command queue -+ */ -+ spinlock_t msvdx_lock; -+ struct mutex msvdx_mutex; -+ struct list_head msvdx_queue; -+ int msvdx_busy; -+ int msvdx_fw_loaded; -+ void *msvdx_fw; -+ int msvdx_fw_size; -+ -+ /* -+ *topaz command queue -+ */ -+ spinlock_t topaz_lock; -+ struct mutex topaz_mutex; -+ struct list_head topaz_queue; -+ int topaz_busy; /* 0 means topaz is free */ -+ int topaz_fw_loaded; -+ -+ /* topaz ccb data */ -+ /* XXX: should the addr stored by 32 bits? more compatible way?? */ -+ uint32_t topaz_ccb_buffer_addr; -+ uint32_t topaz_ccb_ctrl_addr; -+ uint32_t topaz_ccb_size; -+ uint32_t topaz_cmd_windex; -+ uint16_t topaz_cmd_seq; -+ -+ uint32_t stored_initial_qp; -+ uint32_t topaz_dash_access_ctrl; -+ -+ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */ -+ struct ttm_bo_kmap_obj topaz_bo_kmap; -+ void *topaz_ccb_wb; -+ uint32_t topaz_wb_offset; -+ uint32_t *topaz_sync_addr; -+ uint32_t topaz_sync_offset; -+ uint32_t topaz_sync_cmd_seq; -+ -+ struct rw_semaphore sgx_sem; /*sgx is in used*/ -+ struct semaphore pm_sem; /*pm action in process*/ -+ unsigned char graphics_state; -+#ifdef OSPM_STAT -+ unsigned long gfx_d0i3_time; -+ unsigned long gfx_d0i0_time; -+ unsigned long gfx_d3_time; -+ unsigned long gfx_last_mode_change; -+ unsigned long gfx_d0i0_cnt; -+ unsigned long gfx_d0i3_cnt; -+ unsigned long gfx_d3_cnt; -+#endif -+ -+ /* MSVDX OSPM */ -+ unsigned char msvdx_state; -+ unsigned long msvdx_last_action; -+ uint32_t msvdx_clk_state; -+ -+ /* TOPAZ OSPM */ -+ unsigned char topaz_power_state; -+ unsigned long topaz_last_action; -+ uint32_t topaz_clk_state; -+}; -+ -+struct psb_fpriv { -+ struct ttm_object_file *tfile; -+}; -+ -+struct psb_mmu_driver; -+ -+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int); -+extern int drm_pick_crtcs(struct drm_device *dev); -+ -+ -+static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv) -+{ -+ return (struct psb_fpriv *) file_priv->driver_priv; -+} -+ -+static inline struct drm_psb_private *psb_priv(struct drm_device *dev) -+{ -+ return (struct drm_psb_private *) dev->dev_private; -+} -+ -+/* -+ *TTM glue. psb_ttm_glue.c -+ */ -+ -+extern int psb_open(struct inode *inode, struct file *filp); -+extern int psb_release(struct inode *inode, struct file *filp); -+extern int psb_mmap(struct file *filp, struct vm_area_struct *vma); -+ -+extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_verify_access(struct ttm_buffer_object *bo, -+ struct file *filp); -+extern ssize_t psb_ttm_read(struct file *filp, char __user *buf, -+ size_t count, loff_t *f_pos); -+extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf, -+ size_t count, loff_t *f_pos); -+extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_pl_create_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_extension_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_ttm_global_init(struct drm_psb_private *dev_priv); -+extern void psb_ttm_global_release(struct drm_psb_private *dev_priv); -+/* -+ *MMU stuff. -+ */ -+ -+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, -+ int trap_pagefaults, -+ int invalid_type, -+ struct drm_psb_private *dev_priv); -+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver); -+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver -+ *driver); -+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, -+ uint32_t gtt_start, uint32_t gtt_pages); -+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset); -+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, -+ int trap_pagefaults, -+ int invalid_type); -+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd); -+extern void psb_mmu_flush(struct psb_mmu_driver *driver); -+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, -+ unsigned long address, -+ uint32_t num_pages); -+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, -+ uint32_t start_pfn, -+ unsigned long address, -+ uint32_t num_pages, int type); -+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, -+ unsigned long *pfn); -+ -+/* -+ *Enable / disable MMU for different requestors. -+ */ -+ -+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, -+ uint32_t mask); -+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, -+ uint32_t mask); -+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context); -+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, -+ unsigned long address, uint32_t num_pages, -+ uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride, int type); -+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, -+ unsigned long address, uint32_t num_pages, -+ uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride); -+/* -+ *psb_sgx.c -+ */ -+ -+extern int psb_blit_sequence(struct drm_psb_private *dev_priv, -+ uint32_t sequence); -+extern void psb_init_2d(struct drm_psb_private *dev_priv); -+extern int psb_idle_2d(struct drm_device *dev); -+extern int psb_idle_3d(struct drm_device *dev); -+extern int psb_emit_2d_copy_blit(struct drm_device *dev, -+ uint32_t src_offset, -+ uint32_t dst_offset, uint32_t pages, -+ int direction); -+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_reg_submit(struct drm_psb_private *dev_priv, -+ uint32_t *regs, unsigned int cmds); -+extern int psb_submit_copy_cmdbuf(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, -+ unsigned long cmd_size, int engine, -+ uint32_t *copy_buffer); -+ -+extern void psb_init_disallowed(void); -+extern void psb_fence_or_sync(struct drm_file *file_priv, -+ uint32_t engine, -+ uint32_t fence_types, -+ uint32_t fence_flags, -+ struct list_head *list, -+ struct psb_ttm_fence_rep *fence_arg, -+ struct ttm_fence_object **fence_p); -+extern int psb_validate_kernel_buffer(struct psb_context *context, -+ struct ttm_buffer_object *bo, -+ uint32_t fence_class, -+ uint64_t set_flags, -+ uint64_t clr_flags); -+extern void psb_init_ospm(struct drm_psb_private *dev_priv); -+extern void psb_check_power_state(struct drm_device *dev, int devices); -+extern void psb_down_island_power(struct drm_device *dev, int islands); -+extern void psb_up_island_power(struct drm_device *dev, int islands); -+extern int psb_try_power_down_sgx(struct drm_device *dev); -+ -+/* -+ *psb_irq.c -+ */ -+ -+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS); -+extern void psb_irq_preinstall(struct drm_device *dev); -+extern int psb_irq_postinstall(struct drm_device *dev); -+extern void psb_irq_uninstall(struct drm_device *dev); -+extern int psb_vblank_wait2(struct drm_device *dev, -+ unsigned int *sequence); -+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence); -+ -+/* -+ *psb_fence.c -+ */ -+ -+extern void psb_fence_handler(struct drm_device *dev, uint32_t class); -+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv); -+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv); -+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev, -+ uint32_t class); -+extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t flags, uint32_t *sequence, -+ unsigned long *timeout_jiffies); -+extern void psb_fence_error(struct drm_device *dev, -+ uint32_t class, -+ uint32_t sequence, uint32_t type, int error); -+extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev); -+ -+/*MSVDX stuff*/ -+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv); -+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv); -+ -+/* -+ *psb_gtt.c -+ */ -+extern int psb_gtt_init(struct psb_gtt *pg, int resume); -+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, -+ unsigned offset_pages, unsigned num_pages, -+ unsigned desired_tile_stride, -+ unsigned hw_tile_stride, int type); -+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, -+ unsigned num_pages, -+ unsigned desired_tile_stride, -+ unsigned hw_tile_stride); -+ -+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev); -+extern void psb_gtt_takedown(struct psb_gtt *pg, int free); -+ -+/* -+ *psb_fb.c -+ */ -+extern int psbfb_probed(struct drm_device *dev); -+extern int psbfb_remove(struct drm_device *dev, -+ struct drm_framebuffer *fb); -+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern void psbfb_suspend(struct drm_device *dev); -+extern void psbfb_resume(struct drm_device *dev); -+ -+/* -+ *psb_reset.c -+ */ -+ -+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d); -+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv); -+extern void psb_watchdog_init(struct drm_psb_private *dev_priv); -+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv); -+extern void psb_print_pagefault(struct drm_psb_private *dev_priv); -+ -+/* -+ *psb_xhw.c -+ */ -+ -+extern int psb_xhw_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv); -+extern int psb_xhw_init(struct drm_device *dev); -+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv); -+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv, -+ struct drm_file *file_priv, int closing); -+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t fire_flags, -+ uint32_t hw_context, -+ uint32_t *cookie, -+ uint32_t *oom_cmds, -+ uint32_t num_oom_cmds, -+ uint32_t offset, -+ uint32_t engine, uint32_t flags); -+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t fire_flags); -+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t w, -+ uint32_t h, uint32_t *hw_cookie, -+ uint32_t *bo_size, uint32_t *clear_p_start, -+ uint32_t *clear_num_pages); -+ -+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf); -+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *value); -+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t pages, -+ uint32_t * hw_cookie, -+ uint32_t * size, -+ uint32_t * ta_min_size); -+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *cookie); -+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t *cookie, -+ uint32_t *bca, -+ uint32_t *rca, uint32_t *flags); -+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf); -+extern int psb_xhw_handler(struct drm_psb_private *dev_priv); -+extern int psb_xhw_resume(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf); -+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *cookie); -+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t flags, -+ uint32_t param_offset, -+ uint32_t pt_offset, uint32_t *hw_cookie); -+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf); -+ -+/* -+ *psb_schedule.c: HW bug fixing. -+ */ -+ -+#ifdef FIX_TG_16 -+ -+extern void psb_2d_unlock(struct drm_psb_private *dev_priv); -+extern void psb_2d_lock(struct drm_psb_private *dev_priv); -+extern int psb_2d_trylock(struct drm_psb_private *dev_priv); -+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv); -+extern int psb_2d_trylock(struct drm_psb_private *dev_priv); -+extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv); -+#else -+ -+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d) -+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d) -+ -+#endif -+ -+/* modesetting */ -+extern void psb_modeset_init(struct drm_device *dev); -+extern void psb_modeset_cleanup(struct drm_device *dev); -+ -+ -+/* -+ *Utilities -+ */ -+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private -+ -+static inline u32 MSG_READ32(uint port, uint offset) -+{ -+ int mcr = (0xD0<<24) | (port << 16) | (offset << 8); -+ outl(0x800000D0, 0xCF8); -+ outl(mcr, 0xCFC); -+ outl(0x800000D4, 0xCF8); -+ return inl(0xcfc); -+} -+static inline void MSG_WRITE32(uint port, uint offset, u32 value) -+{ -+ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0; -+ outl(0x800000D4, 0xCF8); -+ outl(value, 0xcfc); -+ outl(0x800000D0, 0xCF8); -+ outl(mcr, 0xCFC); -+} -+ -+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ return ioread32(dev_priv->vdc_reg + (reg)); -+} -+ -+#define REG_READ(reg) REGISTER_READ(dev, (reg)) -+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg, -+ uint32_t val) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ iowrite32((val), dev_priv->vdc_reg + (reg)); -+} -+ -+#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val)) -+ -+static inline void REGISTER_WRITE16(struct drm_device *dev, -+ uint32_t reg, uint32_t val) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ iowrite16((val), dev_priv->vdc_reg + (reg)); -+} -+ -+#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val)) -+ -+static inline void REGISTER_WRITE8(struct drm_device *dev, -+ uint32_t reg, uint32_t val) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ iowrite8((val), dev_priv->vdc_reg + (reg)); -+} -+ -+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val)) -+ -+#define PSB_ALIGN_TO(_val, _align) \ -+ (((_val) + ((_align) - 1)) & ~((_align) - 1)) -+#define PSB_WVDC32(_val, _offs) \ -+ iowrite32(_val, dev_priv->vdc_reg + (_offs)) -+#define PSB_RVDC32(_offs) \ -+ ioread32(dev_priv->vdc_reg + (_offs)) -+#define PSB_WSGX32(_val, _offs) \ -+ iowrite32(_val, dev_priv->sgx_reg + (_offs)) -+#define PSB_RSGX32(_offs) \ -+ ioread32(dev_priv->sgx_reg + (_offs)) -+#define PSB_WMSVDX32(_val, _offs) \ -+ iowrite32(_val, dev_priv->msvdx_reg + (_offs)) -+#define PSB_RMSVDX32(_offs) \ -+ ioread32(dev_priv->msvdx_reg + (_offs)) -+ -+#define PSB_ALPL(_val, _base) \ -+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) -+#define PSB_ALPLM(_val, _base) \ -+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK)) -+ -+#define PSB_D_RENDER (1 << 16) -+ -+#define PSB_D_GENERAL (1 << 0) -+#define PSB_D_INIT (1 << 1) -+#define PSB_D_IRQ (1 << 2) -+#define PSB_D_FW (1 << 3) -+#define PSB_D_PERF (1 << 4) -+#define PSB_D_TMP (1 << 5) -+#define PSB_D_PM (1 << 6) -+ -+extern int drm_psb_debug; -+extern int drm_psb_no_fb; -+extern int drm_psb_disable_vsync; -+extern int drm_idle_check_interval; -+extern int drm_psb_ospm; -+ -+#define PSB_DEBUG_FW(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg) -+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg) -+#define PSB_DEBUG_INIT(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg) -+#define PSB_DEBUG_IRQ(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg) -+#define PSB_DEBUG_RENDER(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg) -+#define PSB_DEBUG_PERF(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg) -+#define PSB_DEBUG_TMP(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg) -+#define PSB_DEBUG_PM(_fmt, _arg...) \ -+ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg) -+ -+#if DRM_DEBUG_CODE -+#define PSB_DEBUG(_flag, _fmt, _arg...) \ -+ do { \ -+ if (unlikely((_flag) & drm_psb_debug)) \ -+ printk(KERN_DEBUG \ -+ "[psb:0x%02x:%s] " _fmt , _flag, \ -+ __func__ , ##_arg); \ -+ } while (0) -+#else -+#define PSB_DEBUG(_fmt, _arg...) do { } while (0) -+#endif -+ -+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \ -+ ((dev)->pci_device == 0x8109)) -+ -+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100) -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c ---- a/drivers/gpu/drm/psb/psb_fb.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_fb.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1687 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+ -+#include <linux/module.h> -+#include <linux/kernel.h> -+#include <linux/errno.h> -+#include <linux/string.h> -+#include <linux/mm.h> -+#include <linux/tty.h> -+#include <linux/slab.h> -+#include <linux/delay.h> -+#include <linux/fb.h> -+#include <linux/init.h> -+#include <linux/console.h> -+ -+#include <drm/drmP.h> -+#include <drm/drm.h> -+#include <drm/drm_crtc.h> -+ -+#include "psb_drv.h" -+#include "psb_intel_reg.h" -+#include "psb_intel_drv.h" -+#include "ttm/ttm_userobj_api.h" -+#include "psb_fb.h" -+#include "psb_sgx.h" -+ -+static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth) -+{ -+ switch (depth) { -+ case 8: -+ var->red.offset = 0; -+ var->green.offset = 0; -+ var->blue.offset = 0; -+ var->red.length = 8; -+ var->green.length = 8; -+ var->blue.length = 8; -+ var->transp.length = 0; -+ var->transp.offset = 0; -+ break; -+ case 15: -+ var->red.offset = 10; -+ var->green.offset = 5; -+ var->blue.offset = 0; -+ var->red.length = 5; -+ var->green.length = 5; -+ var->blue.length = 5; -+ var->transp.length = 1; -+ var->transp.offset = 15; -+ break; -+ case 16: -+ var->red.offset = 11; -+ var->green.offset = 5; -+ var->blue.offset = 0; -+ var->red.length = 5; -+ var->green.length = 6; -+ var->blue.length = 5; -+ var->transp.length = 0; -+ var->transp.offset = 0; -+ break; -+ case 24: -+ var->red.offset = 16; -+ var->green.offset = 8; -+ var->blue.offset = 0; -+ var->red.length = 8; -+ var->green.length = 8; -+ var->blue.length = 8; -+ var->transp.length = 0; -+ var->transp.offset = 0; -+ break; -+ case 32: -+ var->red.offset = 16; -+ var->green.offset = 8; -+ var->blue.offset = 0; -+ var->red.length = 8; -+ var->green.length = 8; -+ var->blue.length = 8; -+ var->transp.length = 8; -+ var->transp.offset = 24; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb); -+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, -+ struct drm_file *file_priv, -+ unsigned int *handle); -+ -+static const struct drm_framebuffer_funcs psb_fb_funcs = { -+ .destroy = psb_user_framebuffer_destroy, -+ .create_handle = psb_user_framebuffer_create_handle, -+}; -+ -+struct psbfb_par { -+ struct drm_device *dev; -+ struct psb_framebuffer *psbfb; -+ -+ int dpms_state; -+ -+ int crtc_count; -+ /* crtc currently bound to this */ -+ uint32_t crtc_ids[2]; -+}; -+ -+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) -+ -+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, -+ unsigned blue, unsigned transp, -+ struct fb_info *info) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_framebuffer *fb = &par->psbfb->base; -+ uint32_t v; -+ -+ if (!fb) -+ return -ENOMEM; -+ -+ if (regno > 255) -+ return 1; -+ -+#if 0 /* JB: not drop, check that this works */ -+ if (fb->bits_per_pixel == 8) { -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, -+ head) { -+ for (i = 0; i < par->crtc_count; i++) -+ if (crtc->base.id == par->crtc_ids[i]) -+ break; -+ -+ if (i == par->crtc_count) -+ continue; -+ -+ if (crtc->funcs->gamma_set) -+ crtc->funcs->gamma_set(crtc, red, green, -+ blue, regno); -+ } -+ return 0; -+ } -+#endif -+ -+ red = CMAP_TOHW(red, info->var.red.length); -+ blue = CMAP_TOHW(blue, info->var.blue.length); -+ green = CMAP_TOHW(green, info->var.green.length); -+ transp = CMAP_TOHW(transp, info->var.transp.length); -+ -+ v = (red << info->var.red.offset) | -+ (green << info->var.green.offset) | -+ (blue << info->var.blue.offset) | -+ (transp << info->var.transp.offset); -+ -+ if (regno < 16) { -+ switch (fb->bits_per_pixel) { -+ case 16: -+ ((uint32_t *) info->pseudo_palette)[regno] = v; -+ break; -+ case 24: -+ case 32: -+ ((uint32_t *) info->pseudo_palette)[regno] = v; -+ break; -+ } -+ } -+ -+ return 0; -+} -+ -+static struct drm_display_mode *psbfb_find_first_mode(struct -+ fb_var_screeninfo -+ *var, -+ struct fb_info *info, -+ struct drm_crtc -+ *crtc) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_device *dev = par->dev; -+ struct drm_display_mode *drm_mode; -+ struct drm_display_mode *last_mode = NULL; -+ struct drm_connector *connector; -+ int found; -+ -+ found = 0; -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ if (connector->encoder && connector->encoder->crtc == crtc) { -+ found = 1; -+ break; -+ } -+ } -+ -+ /* found no connector, bail */ -+ if (!found) -+ return NULL; -+ -+ found = 0; -+ list_for_each_entry(drm_mode, &connector->modes, head) { -+ if (drm_mode->hdisplay == var->xres && -+ drm_mode->vdisplay == var->yres -+ && drm_mode->clock != 0) { -+ found = 1; -+ last_mode = drm_mode; -+ } -+ } -+ -+ /* No mode matching mode found */ -+ if (!found) -+ return NULL; -+ -+ return last_mode; -+} -+ -+static int psbfb_check_var(struct fb_var_screeninfo *var, -+ struct fb_info *info) -+{ -+ struct psbfb_par *par = info->par; -+ struct psb_framebuffer *psbfb = par->psbfb; -+ struct drm_device *dev = par->dev; -+ int ret; -+ int depth; -+ int pitch; -+ int bpp = var->bits_per_pixel; -+ -+ if (!psbfb) -+ return -ENOMEM; -+ -+ if (!var->pixclock) -+ return -EINVAL; -+ -+ /* don't support virtuals for now */ -+ if (var->xres_virtual > var->xres) -+ return -EINVAL; -+ -+ if (var->yres_virtual > var->yres) -+ return -EINVAL; -+ -+ switch (bpp) { -+#if 0 /* JB: for now only support true color */ -+ case 8: -+ depth = 8; -+ break; -+#endif -+ case 16: -+ depth = (var->green.length == 6) ? 16 : 15; -+ break; -+ case 24: /* assume this is 32bpp / depth 24 */ -+ bpp = 32; -+ /* fallthrough */ -+ case 32: -+ depth = (var->transp.length > 0) ? 32 : 24; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f; -+ -+ /* Check that we can resize */ -+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) { -+#if 1 -+ /* Need to resize the fb object. -+ * But the generic fbdev code doesn't really understand -+ * that we can do this. So disable for now. -+ */ -+ DRM_INFO("Can't support requested size, too big!\n"); -+ return -EINVAL; -+#else -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ struct ttm_buffer_object *fbo = NULL; -+ struct ttm_bo_kmap_obj tmp_kmap; -+ -+ /* a temporary BO to check if we could resize in setpar. -+ * Therefore no need to set NO_EVICT. -+ */ -+ ret = ttm_buffer_object_create(bdev, -+ pitch * var->yres, -+ ttm_bo_type_kernel, -+ TTM_PL_FLAG_TT | -+ TTM_PL_FLAG_VRAM | -+ TTM_PL_FLAG_NO_EVICT, -+ 0, 0, &fbo); -+ if (ret || !fbo) -+ return -ENOMEM; -+ -+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap); -+ if (ret) { -+ ttm_bo_usage_deref_unlocked(&fbo); -+ return -EINVAL; -+ } -+ -+ ttm_bo_kunmap(&tmp_kmap); -+ /* destroy our current fbo! */ -+ ttm_bo_usage_deref_unlocked(&fbo); -+#endif -+ } -+ -+ ret = fill_fb_bitfield(var, depth); -+ if (ret) -+ return ret; -+ -+#if 1 -+ /* Here we walk the output mode list and look for modes. If we haven't -+ * got it, then bail. Not very nice, so this is disabled. -+ * In the set_par code, we create our mode based on the incoming -+ * parameters. Nicer, but may not be desired by some. -+ */ -+ { -+ struct drm_crtc *crtc; -+ int i; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, -+ head) { -+ struct psb_intel_crtc *psb_intel_crtc = -+ to_psb_intel_crtc(crtc); -+ -+ for (i = 0; i < par->crtc_count; i++) -+ if (crtc->base.id == par->crtc_ids[i]) -+ break; -+ -+ if (i == par->crtc_count) -+ continue; -+ -+ if (psb_intel_crtc->mode_set.num_connectors == 0) -+ continue; -+ -+ if (!psbfb_find_first_mode(&info->var, info, crtc)) -+ return -EINVAL; -+ } -+ } -+#else -+ (void) i; -+ (void) dev; /* silence warnings */ -+ (void) crtc; -+ (void) drm_mode; -+ (void) connector; -+#endif -+ -+ return 0; -+} -+ -+/* this will let fbcon do the mode init */ -+static int psbfb_set_par(struct fb_info *info) -+{ -+ struct psbfb_par *par = info->par; -+ struct psb_framebuffer *psbfb = par->psbfb; -+ struct drm_framebuffer *fb = &psbfb->base; -+ struct drm_device *dev = par->dev; -+ struct fb_var_screeninfo *var = &info->var; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct drm_display_mode *drm_mode; -+ int pitch; -+ int depth; -+ int bpp = var->bits_per_pixel; -+ -+ if (!fb) -+ return -ENOMEM; -+ -+ switch (bpp) { -+ case 8: -+ depth = 8; -+ break; -+ case 16: -+ depth = (var->green.length == 6) ? 16 : 15; -+ break; -+ case 24: /* assume this is 32bpp / depth 24 */ -+ bpp = 32; -+ /* fallthrough */ -+ case 32: -+ depth = (var->transp.length > 0) ? 32 : 24; -+ break; -+ default: -+ DRM_ERROR("Illegal BPP\n"); -+ return -EINVAL; -+ } -+ -+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f; -+ -+ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) { -+#if 1 -+ /* Need to resize the fb object. -+ * But the generic fbdev code doesn't really understand -+ * that we can do this. So disable for now. -+ */ -+ DRM_INFO("Can't support requested size, too big!\n"); -+ return -EINVAL; -+#else -+ int ret; -+ struct ttm_buffer_object *fbo = NULL, *tfbo; -+ struct ttm_bo_kmap_obj tmp_kmap, tkmap; -+ -+ ret = ttm_buffer_object_create(bdev, -+ pitch * var->yres, -+ ttm_bo_type_kernel, -+ TTM_PL_FLAG_MEM_TT | -+ TTM_PL_FLAG_MEM_VRAM | -+ TTM_PL_FLAG_NO_EVICT, -+ 0, 0, &fbo); -+ if (ret || !fbo) { -+ DRM_ERROR -+ ("failed to allocate new resized framebuffer\n"); -+ return -ENOMEM; -+ } -+ -+ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap); -+ if (ret) { -+ DRM_ERROR("failed to kmap framebuffer.\n"); -+ ttm_bo_usage_deref_unlocked(&fbo); -+ return -EINVAL; -+ } -+ -+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", -+ fb->width, fb->height, fb->offset, fbo); -+ -+ /* set new screen base */ -+ info->screen_base = tmp_kmap.virtual; -+ -+ tkmap = fb->kmap; -+ fb->kmap = tmp_kmap; -+ ttm_bo_kunmap(&tkmap); -+ -+ tfbo = fb->bo; -+ fb->bo = fbo; -+ ttm_bo_usage_deref_unlocked(&tfbo); -+#endif -+ } -+ -+ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start; -+ fb->width = var->xres; -+ fb->height = var->yres; -+ fb->bits_per_pixel = bpp; -+ fb->pitch = pitch; -+ fb->depth = depth; -+ -+ info->fix.line_length = psbfb->base.pitch; -+ info->fix.visual = -+ (psbfb->base.depth == -+ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; -+ -+ /* some fbdev's apps don't want these to change */ -+ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset; -+ -+#if 0 -+ /* relates to resize - disable */ -+ info->fix.smem_len = info->fix.line_length * var->yres; -+ info->screen_size = info->fix.smem_len; /* ??? */ -+#endif -+ -+ /* Should we walk the output's modelist or just create our own ??? -+ * For now, we create and destroy a mode based on the incoming -+ * parameters. But there's commented out code below which scans -+ * the output list too. -+ */ -+#if 1 -+ /* This code is now in the for loop futher down. */ -+#endif -+ -+ { -+ struct drm_crtc *crtc; -+ int ret; -+ int i; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, -+ head) { -+ struct psb_intel_crtc *psb_intel_crtc = -+ to_psb_intel_crtc(crtc); -+ -+ for (i = 0; i < par->crtc_count; i++) -+ if (crtc->base.id == par->crtc_ids[i]) -+ break; -+ -+ if (i == par->crtc_count) -+ continue; -+ -+ if (psb_intel_crtc->mode_set.num_connectors == 0) -+ continue; -+ -+#if 1 -+ drm_mode = -+ psbfb_find_first_mode(&info->var, info, crtc); -+ if (!drm_mode) -+ DRM_ERROR("No matching mode found\n"); -+ psb_intel_crtc->mode_set.mode = drm_mode; -+#endif -+ -+#if 0 /* FIXME: TH */ -+ if (crtc->fb == psb_intel_crtc->mode_set.fb) { -+#endif -+ DRM_DEBUG -+ ("setting mode on crtc %p with id %u\n", -+ crtc, crtc->base.id); -+ ret = -+ crtc->funcs-> -+ set_config(&psb_intel_crtc->mode_set); -+ if (ret) { -+ DRM_ERROR("Failed setting mode\n"); -+ return ret; -+ } -+#if 0 -+ } -+#endif -+ } -+ DRM_DEBUG("Set par returned OK.\n"); -+ return 0; -+ } -+ -+ return 0; -+} -+ -+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf, -+ unsigned size) -+{ -+ int ret = 0; -+ int i; -+ unsigned submit_size; -+ -+ while (size > 0) { -+ submit_size = (size < 0x60) ? size : 0x60; -+ size -= submit_size; -+ ret = psb_2d_wait_available(dev_priv, submit_size); -+ if (ret) -+ return ret; -+ -+ submit_size <<= 2; -+ for (i = 0; i < submit_size; i += 4) { -+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i); -+ } -+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4); -+ } -+ return 0; -+} -+ -+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv, -+ uint32_t dst_offset, uint32_t dst_stride, -+ uint32_t dst_format, uint16_t dst_x, -+ uint16_t dst_y, uint16_t size_x, -+ uint16_t size_y, uint32_t fill) -+{ -+ uint32_t buffer[10]; -+ uint32_t *buf; -+ -+ buf = buffer; -+ -+ *buf++ = PSB_2D_FENCE_BH; -+ -+ *buf++ = -+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride << -+ PSB_2D_DST_STRIDE_SHIFT); -+ *buf++ = dst_offset; -+ -+ *buf++ = -+ PSB_2D_BLIT_BH | -+ PSB_2D_ROT_NONE | -+ PSB_2D_COPYORDER_TL2BR | -+ PSB_2D_DSTCK_DISABLE | -+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY; -+ -+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT; -+ *buf++ = -+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y << -+ PSB_2D_DST_YSTART_SHIFT); -+ *buf++ = -+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y << -+ PSB_2D_DST_YSIZE_SHIFT); -+ *buf++ = PSB_2D_FLUSH_BH; -+ -+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer); -+} -+ -+static void psbfb_fillrect_accel(struct fb_info *info, -+ const struct fb_fillrect *r) -+{ -+ struct psbfb_par *par = info->par; -+ struct psb_framebuffer *psbfb = par->psbfb; -+ struct drm_framebuffer *fb = &psbfb->base; -+ struct drm_psb_private *dev_priv = par->dev->dev_private; -+ uint32_t offset; -+ uint32_t stride; -+ uint32_t format; -+ -+ if (!fb) -+ return; -+ -+ offset = psbfb->offset; -+ stride = fb->pitch; -+ -+ switch (fb->depth) { -+ case 8: -+ format = PSB_2D_DST_332RGB; -+ break; -+ case 15: -+ format = PSB_2D_DST_555RGB; -+ break; -+ case 16: -+ format = PSB_2D_DST_565RGB; -+ break; -+ case 24: -+ case 32: -+ /* this is wrong but since we don't do blending its okay */ -+ format = PSB_2D_DST_8888ARGB; -+ break; -+ default: -+ /* software fallback */ -+ cfb_fillrect(info, r); -+ return; -+ } -+ -+ psb_accel_2d_fillrect(dev_priv, -+ offset, stride, format, -+ r->dx, r->dy, r->width, r->height, r->color); -+} -+ -+static void psbfb_fillrect(struct fb_info *info, -+ const struct fb_fillrect *rect) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_device *dev = par->dev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ if (unlikely(info->state != FBINFO_STATE_RUNNING)) -+ return; -+ -+ if (info->flags & FBINFO_HWACCEL_DISABLED) -+ return cfb_fillrect(info, rect); -+ -+ if (psb_2d_trylock(dev_priv)) { -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ psbfb_fillrect_accel(info, rect); -+ psb_2d_unlock(dev_priv); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ } else -+ cfb_fillrect(info, rect); -+} -+ -+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir) -+{ -+ if (xdir < 0) -+ return (ydir < -+ 0) ? PSB_2D_COPYORDER_BR2TL : -+ PSB_2D_COPYORDER_TR2BL; -+ else -+ return (ydir < -+ 0) ? PSB_2D_COPYORDER_BL2TR : -+ PSB_2D_COPYORDER_TL2BR; -+} -+ -+/* -+ * @srcOffset in bytes -+ * @srcStride in bytes -+ * @srcFormat psb 2D format defines -+ * @dstOffset in bytes -+ * @dstStride in bytes -+ * @dstFormat psb 2D format defines -+ * @srcX offset in pixels -+ * @srcY offset in pixels -+ * @dstX offset in pixels -+ * @dstY offset in pixels -+ * @sizeX of the copied area -+ * @sizeY of the copied area -+ */ -+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv, -+ uint32_t src_offset, uint32_t src_stride, -+ uint32_t src_format, uint32_t dst_offset, -+ uint32_t dst_stride, uint32_t dst_format, -+ uint16_t src_x, uint16_t src_y, -+ uint16_t dst_x, uint16_t dst_y, -+ uint16_t size_x, uint16_t size_y) -+{ -+ uint32_t blit_cmd; -+ uint32_t buffer[10]; -+ uint32_t *buf; -+ uint32_t direction; -+ -+ buf = buffer; -+ -+ direction = -+ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y); -+ -+ if (direction == PSB_2D_COPYORDER_BR2TL || -+ direction == PSB_2D_COPYORDER_TR2BL) { -+ src_x += size_x - 1; -+ dst_x += size_x - 1; -+ } -+ if (direction == PSB_2D_COPYORDER_BR2TL || -+ direction == PSB_2D_COPYORDER_BL2TR) { -+ src_y += size_y - 1; -+ dst_y += size_y - 1; -+ } -+ -+ blit_cmd = -+ PSB_2D_BLIT_BH | -+ PSB_2D_ROT_NONE | -+ PSB_2D_DSTCK_DISABLE | -+ PSB_2D_SRCCK_DISABLE | -+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction; -+ -+ *buf++ = PSB_2D_FENCE_BH; -+ *buf++ = -+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride << -+ PSB_2D_DST_STRIDE_SHIFT); -+ *buf++ = dst_offset; -+ *buf++ = -+ PSB_2D_SRC_SURF_BH | src_format | (src_stride << -+ PSB_2D_SRC_STRIDE_SHIFT); -+ *buf++ = src_offset; -+ *buf++ = -+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | -+ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT); -+ *buf++ = blit_cmd; -+ *buf++ = -+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y << -+ PSB_2D_DST_YSTART_SHIFT); -+ *buf++ = -+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y << -+ PSB_2D_DST_YSIZE_SHIFT); -+ *buf++ = PSB_2D_FLUSH_BH; -+ -+ return psbfb_2d_submit(dev_priv, buffer, buf - buffer); -+} -+ -+static void psbfb_copyarea_accel(struct fb_info *info, -+ const struct fb_copyarea *a) -+{ -+ struct psbfb_par *par = info->par; -+ struct psb_framebuffer *psbfb = par->psbfb; -+ struct drm_framebuffer *fb = &psbfb->base; -+ struct drm_psb_private *dev_priv = par->dev->dev_private; -+ uint32_t offset; -+ uint32_t stride; -+ uint32_t src_format; -+ uint32_t dst_format; -+ -+ if (!fb) -+ return; -+ -+ offset = psbfb->offset; -+ stride = fb->pitch; -+ -+ switch (fb->depth) { -+ case 8: -+ src_format = PSB_2D_SRC_332RGB; -+ dst_format = PSB_2D_DST_332RGB; -+ break; -+ case 15: -+ src_format = PSB_2D_SRC_555RGB; -+ dst_format = PSB_2D_DST_555RGB; -+ break; -+ case 16: -+ src_format = PSB_2D_SRC_565RGB; -+ dst_format = PSB_2D_DST_565RGB; -+ break; -+ case 24: -+ case 32: -+ /* this is wrong but since we don't do blending its okay */ -+ src_format = PSB_2D_SRC_8888ARGB; -+ dst_format = PSB_2D_DST_8888ARGB; -+ break; -+ default: -+ /* software fallback */ -+ cfb_copyarea(info, a); -+ return; -+ } -+ -+ psb_accel_2d_copy(dev_priv, -+ offset, stride, src_format, -+ offset, stride, dst_format, -+ a->sx, a->sy, a->dx, a->dy, a->width, a->height); -+} -+ -+static void psbfb_copyarea(struct fb_info *info, -+ const struct fb_copyarea *region) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_device *dev = par->dev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ if (unlikely(info->state != FBINFO_STATE_RUNNING)) -+ return; -+ -+ if (info->flags & FBINFO_HWACCEL_DISABLED) -+ return cfb_copyarea(info, region); -+ -+ if (psb_2d_trylock(dev_priv)) { -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ psbfb_copyarea_accel(info, region); -+ psb_2d_unlock(dev_priv); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ } else -+ cfb_copyarea(info, region); -+} -+ -+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image) -+{ -+ if (unlikely(info->state != FBINFO_STATE_RUNNING)) -+ return; -+ -+ cfb_imageblit(info, image); -+} -+ -+static void psbfb_onoff(struct fb_info *info, int dpms_mode) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_device *dev = par->dev; -+ struct drm_crtc *crtc; -+ struct drm_encoder *encoder; -+ int i; -+ -+ /* -+ * For each CRTC in this fb, find all associated encoders -+ * and turn them off, then turn off the CRTC. -+ */ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct drm_crtc_helper_funcs *crtc_funcs = -+ crtc->helper_private; -+ -+ for (i = 0; i < par->crtc_count; i++) -+ if (crtc->base.id == par->crtc_ids[i]) -+ break; -+ -+ if (i == par->crtc_count) -+ continue; -+ -+ if (dpms_mode == DRM_MODE_DPMS_ON) -+ crtc_funcs->dpms(crtc, dpms_mode); -+ -+ /* Found a CRTC on this fb, now find encoders */ -+ list_for_each_entry(encoder, -+ &dev->mode_config.encoder_list, head) { -+ if (encoder->crtc == crtc) { -+ struct drm_encoder_helper_funcs -+ *encoder_funcs; -+ encoder_funcs = encoder->helper_private; -+ encoder_funcs->dpms(encoder, dpms_mode); -+ } -+ } -+ -+ if (dpms_mode == DRM_MODE_DPMS_OFF) -+ crtc_funcs->dpms(crtc, dpms_mode); -+ } -+} -+ -+static int psbfb_blank(int blank_mode, struct fb_info *info) -+{ -+ struct psbfb_par *par = info->par; -+ -+ par->dpms_state = blank_mode; -+ PSB_DEBUG_PM("psbfb_blank \n"); -+ switch (blank_mode) { -+ case FB_BLANK_UNBLANK: -+ psbfb_onoff(info, DRM_MODE_DPMS_ON); -+ break; -+ case FB_BLANK_NORMAL: -+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY); -+ break; -+ case FB_BLANK_HSYNC_SUSPEND: -+ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY); -+ break; -+ case FB_BLANK_VSYNC_SUSPEND: -+ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND); -+ break; -+ case FB_BLANK_POWERDOWN: -+ psbfb_onoff(info, DRM_MODE_DPMS_OFF); -+ break; -+ } -+ -+ return 0; -+} -+ -+ -+static int psbfb_kms_off(struct drm_device *dev, int suspend) -+{ -+ struct drm_framebuffer *fb = 0; -+ DRM_DEBUG("psbfb_kms_off_ioctl\n"); -+ -+ mutex_lock(&dev->mode_config.mutex); -+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) { -+ struct fb_info *info = fb->fbdev; -+ -+ if (suspend) -+ fb_set_suspend(info, 1); -+ } -+ mutex_unlock(&dev->mode_config.mutex); -+ -+ return 0; -+} -+ -+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ -+ if (drm_psb_no_fb) -+ return 0; -+ acquire_console_sem(); -+ ret = psbfb_kms_off(dev, 0); -+ release_console_sem(); -+ -+ return ret; -+} -+ -+static int psbfb_kms_on(struct drm_device *dev, int resume) -+{ -+ struct drm_framebuffer *fb = 0; -+ -+ DRM_DEBUG("psbfb_kms_on_ioctl\n"); -+ -+ mutex_lock(&dev->mode_config.mutex); -+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) { -+ struct fb_info *info = fb->fbdev; -+ -+ if (resume) -+ fb_set_suspend(info, 0); -+ -+ } -+ mutex_unlock(&dev->mode_config.mutex); -+ -+ return 0; -+} -+ -+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ -+ if (drm_psb_no_fb) -+ return 0; -+ acquire_console_sem(); -+ ret = psbfb_kms_on(dev, 0); -+ release_console_sem(); -+ drm_helper_disable_unused_functions(dev); -+ return ret; -+} -+ -+void psbfb_suspend(struct drm_device *dev) -+{ -+ acquire_console_sem(); -+ psbfb_kms_off(dev, 1); -+ release_console_sem(); -+} -+ -+void psbfb_resume(struct drm_device *dev) -+{ -+ acquire_console_sem(); -+ psbfb_kms_on(dev, 1); -+ release_console_sem(); -+ drm_helper_disable_unused_functions(dev); -+} -+ -+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) -+{ -+ struct psbfb_par *par = info->par; -+ struct psb_framebuffer *psbfb = par->psbfb; -+ struct ttm_buffer_object *bo = psbfb->bo; -+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; -+ unsigned long offset = vma->vm_pgoff; -+ -+ if (vma->vm_pgoff != 0) -+ return -EINVAL; -+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) -+ return -EINVAL; -+ if (offset + size > bo->num_pages) -+ return -EINVAL; -+ -+ mutex_lock(&bo->mutex); -+ if (!psbfb->addr_space) -+ psbfb->addr_space = vma->vm_file->f_mapping; -+ mutex_unlock(&bo->mutex); -+ -+ return ttm_fbdev_mmap(vma, bo); -+} -+ -+int psbfb_sync(struct fb_info *info) -+{ -+ struct psbfb_par *par = info->par; -+ struct drm_psb_private *dev_priv = par->dev->dev_private; -+ -+ if (psb_2d_trylock(dev_priv)) { -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0) -+ psb_idle_2d(par->dev); -+ psb_2d_unlock(dev_priv); -+ } else -+ udelay(5); -+ -+ return 0; -+} -+ -+static struct fb_ops psbfb_ops = { -+ .owner = THIS_MODULE, -+ .fb_check_var = psbfb_check_var, -+ .fb_set_par = psbfb_set_par, -+ .fb_setcolreg = psbfb_setcolreg, -+ .fb_fillrect = psbfb_fillrect, -+ .fb_copyarea = psbfb_copyarea, -+ .fb_imageblit = psbfb_imageblit, -+ .fb_mmap = psbfb_mmap, -+ .fb_sync = psbfb_sync, -+ .fb_blank = psbfb_blank, -+}; -+ -+static struct drm_mode_set panic_mode; -+ -+int psbfb_panic(struct notifier_block *n, unsigned long ununsed, -+ void *panic_str) -+{ -+ DRM_ERROR("panic occurred, switching back to text console\n"); -+ drm_crtc_helper_set_config(&panic_mode); -+ -+ return 0; -+} -+EXPORT_SYMBOL(psbfb_panic); -+ -+static struct notifier_block paniced = { -+ .notifier_call = psbfb_panic, -+}; -+ -+ -+static struct drm_framebuffer *psb_framebuffer_create -+ (struct drm_device *dev, struct drm_mode_fb_cmd *r, -+ void *mm_private) -+{ -+ struct psb_framebuffer *fb; -+ int ret; -+ -+ fb = kzalloc(sizeof(*fb), GFP_KERNEL); -+ if (!fb) -+ return NULL; -+ -+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs); -+ -+ if (ret) -+ goto err; -+ -+ drm_helper_mode_fill_fb_struct(&fb->base, r); -+ -+ fb->bo = mm_private; -+ -+ return &fb->base; -+ -+err: -+ kfree(fb); -+ return NULL; -+} -+ -+static struct drm_framebuffer *psb_user_framebuffer_create -+ (struct drm_device *dev, struct drm_file *filp, -+ struct drm_mode_fb_cmd *r) -+{ -+ struct ttm_buffer_object *bo = NULL; -+ uint64_t size; -+ -+ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle); -+ if (!bo) -+ return NULL; -+ -+ /* JB: TODO not drop, make smarter */ -+ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT; -+ if (size < r->width * r->height * 4) -+ return NULL; -+ -+ /* JB: TODO not drop, refcount buffer */ -+ return psb_framebuffer_create(dev, r, bo); -+} -+ -+int psbfb_create(struct drm_device *dev, uint32_t fb_width, -+ uint32_t fb_height, uint32_t surface_width, -+ uint32_t surface_height, struct psb_framebuffer **psbfb_p) -+{ -+ struct fb_info *info; -+ struct psbfb_par *par; -+ struct drm_framebuffer *fb; -+ struct psb_framebuffer *psbfb; -+ struct ttm_bo_kmap_obj tmp_kmap; -+ struct drm_mode_fb_cmd mode_cmd; -+ struct device *device = &dev->pdev->dev; -+ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev; -+ int size, aligned_size, ret; -+ struct ttm_buffer_object *fbo = NULL; -+ bool is_iomem; -+ -+ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */ -+ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */ -+ -+ mode_cmd.bpp = 32; -+ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8); -+ mode_cmd.depth = 24; -+ -+ size = mode_cmd.pitch * mode_cmd.height; -+ aligned_size = ALIGN(size, PAGE_SIZE); -+ ret = ttm_buffer_object_create(bdev, -+ aligned_size, -+ ttm_bo_type_kernel, -+ TTM_PL_FLAG_TT | -+ TTM_PL_FLAG_VRAM | -+ TTM_PL_FLAG_NO_EVICT, -+ 0, 0, 0, NULL, &fbo); -+ -+ if (unlikely(ret != 0)) { -+ DRM_ERROR("failed to allocate framebuffer.\n"); -+ return -ENOMEM; -+ } -+ -+ mutex_lock(&dev->struct_mutex); -+ fb = psb_framebuffer_create(dev, &mode_cmd, fbo); -+ if (!fb) { -+ DRM_ERROR("failed to allocate fb.\n"); -+ ret = -ENOMEM; -+ goto out_err0; -+ } -+ psbfb = to_psb_fb(fb); -+ psbfb->bo = fbo; -+ -+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); -+ info = framebuffer_alloc(sizeof(struct psbfb_par), device); -+ if (!info) { -+ ret = -ENOMEM; -+ goto out_err1; -+ } -+ -+ par = info->par; -+ par->psbfb = psbfb; -+ -+ strcpy(info->fix.id, "psbfb"); -+ info->fix.type = FB_TYPE_PACKED_PIXELS; -+ info->fix.visual = FB_VISUAL_TRUECOLOR; -+ info->fix.type_aux = 0; -+ info->fix.xpanstep = 1; /* doing it in hw */ -+ info->fix.ypanstep = 1; /* doing it in hw */ -+ info->fix.ywrapstep = 0; -+ info->fix.accel = FB_ACCEL_I830; -+ info->fix.type_aux = 0; -+ -+ info->flags = FBINFO_DEFAULT; -+ -+ info->fbops = &psbfb_ops; -+ -+ info->fix.line_length = fb->pitch; -+ info->fix.smem_start = -+ dev->mode_config.fb_base + psbfb->bo->offset; -+ info->fix.smem_len = size; -+ -+ info->flags = FBINFO_DEFAULT; -+ -+ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap); -+ if (ret) { -+ DRM_ERROR("error mapping fb: %d\n", ret); -+ goto out_err2; -+ } -+ -+ -+ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem); -+ info->screen_size = size; -+ -+ if (is_iomem) -+ memset_io(info->screen_base, 0, size); -+ else -+ memset(info->screen_base, 0, size); -+ -+ info->pseudo_palette = fb->pseudo_palette; -+ info->var.xres_virtual = fb->width; -+ info->var.yres_virtual = fb->height; -+ info->var.bits_per_pixel = fb->bits_per_pixel; -+ info->var.xoffset = 0; -+ info->var.yoffset = 0; -+ info->var.activate = FB_ACTIVATE_NOW; -+ info->var.height = -1; -+ info->var.width = -1; -+ -+ info->var.xres = fb_width; -+ info->var.yres = fb_height; -+ -+ info->fix.mmio_start = pci_resource_start(dev->pdev, 0); -+ info->fix.mmio_len = pci_resource_len(dev->pdev, 0); -+ -+ info->pixmap.size = 64 * 1024; -+ info->pixmap.buf_align = 8; -+ info->pixmap.access_align = 32; -+ info->pixmap.flags = FB_PIXMAP_SYSTEM; -+ info->pixmap.scan_align = 1; -+ -+ DRM_DEBUG("fb depth is %d\n", fb->depth); -+ DRM_DEBUG(" pitch is %d\n", fb->pitch); -+ fill_fb_bitfield(&info->var, fb->depth); -+ -+ fb->fbdev = info; -+ -+ par->dev = dev; -+ -+ /* To allow resizing without swapping buffers */ -+ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n", -+ psbfb->base.width, -+ psbfb->base.height, psbfb->bo->offset, psbfb->bo); -+ -+ if (psbfb_p) -+ *psbfb_p = psbfb; -+ -+ mutex_unlock(&dev->struct_mutex); -+ -+ return 0; -+out_err2: -+ unregister_framebuffer(info); -+out_err1: -+ fb->funcs->destroy(fb); -+out_err0: -+ mutex_unlock(&dev->struct_mutex); -+ ttm_bo_unref(&fbo); -+ return ret; -+} -+ -+static int psbfb_multi_fb_probe_crtc(struct drm_device *dev, -+ struct drm_crtc *crtc) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ struct drm_framebuffer *fb = crtc->fb; -+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); -+ struct drm_connector *connector; -+ struct fb_info *info; -+ struct psbfb_par *par; -+ struct drm_mode_set *modeset; -+ unsigned int width, height; -+ int new_fb = 0; -+ int ret, i, conn_count; -+ -+ if (!drm_helper_crtc_in_use(crtc)) -+ return 0; -+ -+ if (!crtc->desired_mode) -+ return 0; -+ -+ width = crtc->desired_mode->hdisplay; -+ height = crtc->desired_mode->vdisplay; -+ -+ /* is there an fb bound to this crtc already */ -+ if (!psb_intel_crtc->mode_set.fb) { -+ ret = -+ psbfb_create(dev, width, height, width, height, -+ &psbfb); -+ if (ret) -+ return -EINVAL; -+ new_fb = 1; -+ } else { -+ fb = psb_intel_crtc->mode_set.fb; -+ if ((fb->width < width) || (fb->height < height)) -+ return -EINVAL; -+ } -+ -+ info = fb->fbdev; -+ par = info->par; -+ -+ modeset = &psb_intel_crtc->mode_set; -+ modeset->fb = fb; -+ conn_count = 0; -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ if (connector->encoder) -+ if (connector->encoder->crtc == modeset->crtc) { -+ modeset->connectors[conn_count] = -+ connector; -+ conn_count++; -+ if (conn_count > INTELFB_CONN_LIMIT) -+ BUG(); -+ } -+ } -+ -+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) -+ modeset->connectors[i] = NULL; -+ -+ par->crtc_ids[0] = crtc->base.id; -+ -+ modeset->num_connectors = conn_count; -+ if (modeset->mode != modeset->crtc->desired_mode) -+ modeset->mode = modeset->crtc->desired_mode; -+ -+ par->crtc_count = 1; -+ -+ if (new_fb) { -+ info->var.pixclock = -1; -+ if (register_framebuffer(info) < 0) -+ return -EINVAL; -+ } else -+ psbfb_set_par(info); -+ -+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, -+ info->fix.id); -+ -+ /* Switch back to kernel console on panic */ -+ panic_mode = *modeset; -+ atomic_notifier_chain_register(&panic_notifier_list, &paniced); -+ printk(KERN_INFO "registered panic notifier\n"); -+ -+ return 0; -+} -+ -+static int psbfb_multi_fb_probe(struct drm_device *dev) -+{ -+ -+ struct drm_crtc *crtc; -+ int ret = 0; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ ret = psbfb_multi_fb_probe_crtc(dev, crtc); -+ if (ret) -+ return ret; -+ } -+ return ret; -+} -+ -+static int psbfb_single_fb_probe(struct drm_device *dev) -+{ -+ struct drm_crtc *crtc; -+ struct drm_connector *connector; -+ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1; -+ unsigned int surface_width = 0, surface_height = 0; -+ int new_fb = 0; -+ int crtc_count = 0; -+ int ret, i, conn_count = 0; -+ struct fb_info *info; -+ struct psbfb_par *par; -+ struct drm_mode_set *modeset = NULL; -+ struct drm_framebuffer *fb = NULL; -+ struct psb_framebuffer *psbfb = NULL; -+ -+ /* first up get a count of crtcs now in use and -+ * new min/maxes width/heights */ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ if (drm_helper_crtc_in_use(crtc)) { -+ if (crtc->desired_mode) { -+ fb = crtc->fb; -+ if (crtc->desired_mode->hdisplay < -+ fb_width) -+ fb_width = -+ crtc->desired_mode->hdisplay; -+ -+ if (crtc->desired_mode->vdisplay < -+ fb_height) -+ fb_height = -+ crtc->desired_mode->vdisplay; -+ -+ if (crtc->desired_mode->hdisplay > -+ surface_width) -+ surface_width = -+ crtc->desired_mode->hdisplay; -+ -+ if (crtc->desired_mode->vdisplay > -+ surface_height) -+ surface_height = -+ crtc->desired_mode->vdisplay; -+ -+ } -+ crtc_count++; -+ } -+ } -+ -+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { -+ /* hmm everyone went away - assume VGA cable just fell out -+ and will come back later. */ -+ return 0; -+ } -+ -+ /* do we have an fb already? */ -+ if (list_empty(&dev->mode_config.fb_kernel_list)) { -+ /* create an fb if we don't have one */ -+ ret = -+ psbfb_create(dev, fb_width, fb_height, surface_width, -+ surface_height, &psbfb); -+ if (ret) -+ return -EINVAL; -+ new_fb = 1; -+ fb = &psbfb->base; -+ } else { -+ fb = list_first_entry(&dev->mode_config.fb_kernel_list, -+ struct drm_framebuffer, filp_head); -+ -+ /* if someone hotplugs something bigger than we have already -+ * allocated, we are pwned. As really we can't resize an -+ * fbdev that is in the wild currently due to fbdev not really -+ * being designed for the lower layers moving stuff around -+ * under it. - so in the grand style of things - punt. */ -+ if ((fb->width < surface_width) -+ || (fb->height < surface_height)) { -+ DRM_ERROR -+ ("Framebuffer not large enough to scale" -+ " console onto.\n"); -+ return -EINVAL; -+ } -+ } -+ -+ info = fb->fbdev; -+ par = info->par; -+ -+ crtc_count = 0; -+ /* okay we need to setup new connector sets in the crtcs */ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ modeset = &psb_intel_crtc->mode_set; -+ modeset->fb = fb; -+ conn_count = 0; -+ list_for_each_entry(connector, -+ &dev->mode_config.connector_list, -+ head) { -+ if (connector->encoder) -+ if (connector->encoder->crtc == -+ modeset->crtc) { -+ modeset->connectors[conn_count] = -+ connector; -+ conn_count++; -+ if (conn_count > -+ INTELFB_CONN_LIMIT) -+ BUG(); -+ } -+ } -+ -+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++) -+ modeset->connectors[i] = NULL; -+ -+ par->crtc_ids[crtc_count++] = crtc->base.id; -+ -+ modeset->num_connectors = conn_count; -+ if (modeset->mode != modeset->crtc->desired_mode) -+ modeset->mode = modeset->crtc->desired_mode; -+ } -+ par->crtc_count = crtc_count; -+ -+ if (new_fb) { -+ info->var.pixclock = -1; -+ if (register_framebuffer(info) < 0) -+ return -EINVAL; -+ } else -+ psbfb_set_par(info); -+ -+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, -+ info->fix.id); -+ -+ /* Switch back to kernel console on panic */ -+ panic_mode = *modeset; -+ atomic_notifier_chain_register(&panic_notifier_list, &paniced); -+ printk(KERN_INFO "registered panic notifier\n"); -+ -+ return 0; -+} -+ -+int psbfb_probe(struct drm_device *dev) -+{ -+ int ret = 0; -+ -+ DRM_DEBUG("\n"); -+ -+ /* something has changed in the lower levels of hell - deal with it -+ here */ -+ -+ /* two modes : a) 1 fb to rule all crtcs. -+ b) one fb per crtc. -+ two actions 1) new connected device -+ 2) device removed. -+ case a/1 : if the fb surface isn't big enough - -+ resize the surface fb. -+ if the fb size isn't big enough - resize fb into surface. -+ if everything big enough configure the new crtc/etc. -+ case a/2 : undo the configuration -+ possibly resize down the fb to fit the new configuration. -+ case b/1 : see if it is on a new crtc - setup a new fb and add it. -+ case b/2 : teardown the new fb. -+ */ -+ -+ /* mode a first */ -+ /* search for an fb */ -+ if (0 /*i915_fbpercrtc == 1 */) -+ ret = psbfb_multi_fb_probe(dev); -+ else -+ ret = psbfb_single_fb_probe(dev); -+ -+ return ret; -+} -+EXPORT_SYMBOL(psbfb_probe); -+ -+int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) -+{ -+ struct fb_info *info; -+ struct psb_framebuffer *psbfb = to_psb_fb(fb); -+ -+ if (drm_psb_no_fb) -+ return 0; -+ -+ info = fb->fbdev; -+ -+ if (info) { -+ unregister_framebuffer(info); -+ ttm_bo_kunmap(&psbfb->kmap); -+ ttm_bo_unref(&psbfb->bo); -+ framebuffer_release(info); -+ } -+ -+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); -+ memset(&panic_mode, 0, sizeof(struct drm_mode_set)); -+ return 0; -+} -+EXPORT_SYMBOL(psbfb_remove); -+ -+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb, -+ struct drm_file *file_priv, -+ unsigned int *handle) -+{ -+ /* JB: TODO currently we can't go from a bo to a handle with ttm */ -+ (void) file_priv; -+ *handle = 0; -+ return 0; -+} -+ -+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb) -+{ -+ struct drm_device *dev = fb->dev; -+ if (fb->fbdev) -+ psbfb_remove(dev, fb); -+ -+ /* JB: TODO not drop, refcount buffer */ -+ drm_framebuffer_cleanup(fb); -+ -+ kfree(fb); -+} -+ -+static const struct drm_mode_config_funcs psb_mode_funcs = { -+ .fb_create = psb_user_framebuffer_create, -+ .fb_changed = psbfb_probe, -+}; -+ -+static void psb_setup_outputs(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct drm_connector *connector; -+ -+ if (IS_MRST(dev)) { -+ if (dev_priv->iLVDS_enable) -+ /* Set up integrated LVDS for MRST */ -+ mrst_lvds_init(dev, &dev_priv->mode_dev); -+ else { -+ /* Set up integrated MIPI for MRST */ -+ mrst_dsi_init(dev, &dev_priv->mode_dev); -+ } -+ } else { -+ psb_intel_lvds_init(dev, &dev_priv->mode_dev); -+ /* psb_intel_sdvo_init(dev, SDVOB); */ -+ } -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ struct psb_intel_output *psb_intel_output = -+ to_psb_intel_output(connector); -+ struct drm_encoder *encoder = &psb_intel_output->enc; -+ int crtc_mask = 0, clone_mask = 0; -+ -+ /* valid crtcs */ -+ switch (psb_intel_output->type) { -+ case INTEL_OUTPUT_SDVO: -+ crtc_mask = ((1 << 0) | (1 << 1)); -+ clone_mask = (1 << INTEL_OUTPUT_SDVO); -+ break; -+ case INTEL_OUTPUT_LVDS: -+ if (IS_MRST(dev)) -+ crtc_mask = (1 << 0); -+ else -+ crtc_mask = (1 << 1); -+ -+ clone_mask = (1 << INTEL_OUTPUT_LVDS); -+ break; -+ case INTEL_OUTPUT_MIPI: -+ crtc_mask = (1 << 0); -+ clone_mask = (1 << INTEL_OUTPUT_MIPI); -+ break; -+ } -+ encoder->possible_crtcs = crtc_mask; -+ encoder->possible_clones = -+ psb_intel_connector_clones(dev, clone_mask); -+ } -+} -+ -+static void *psb_bo_from_handle(struct drm_device *dev, -+ struct drm_file *file_priv, -+ unsigned int handle) -+{ -+ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile, -+ handle); -+} -+ -+static size_t psb_bo_size(struct drm_device *dev, void *bof) -+{ -+ struct ttm_buffer_object *bo = bof; -+ return bo->num_pages << PAGE_SHIFT; -+} -+ -+static size_t psb_bo_offset(struct drm_device *dev, void *bof) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_buffer_object *bo = bof; -+ -+ size_t offset = bo->offset - dev_priv->pg->gatt_start; -+ DRM_DEBUG("Offset %u\n", offset); -+ return offset; -+} -+ -+static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo) -+{ -+#if 0 /* JB: Not used for the drop */ -+ struct ttm_buffer_object *bo = bof; -+ We should do things like check if -+ the buffer is in a scanout : able -+ place.And make sure that its pinned. -+#endif -+ return 0; -+ } -+ -+ static int psb_bo_unpin_for_scanout(struct drm_device *dev, -+ void *bo) { -+#if 0 /* JB: Not used for the drop */ -+ struct ttm_buffer_object *bo = bof; -+#endif -+ return 0; -+ } -+ -+ void psb_modeset_init(struct drm_device *dev) -+ { -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; -+ int i; -+ int num_pipe; -+ -+ /* Init mm functions */ -+ mode_dev->bo_from_handle = psb_bo_from_handle; -+ mode_dev->bo_size = psb_bo_size; -+ mode_dev->bo_offset = psb_bo_offset; -+ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout; -+ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout; -+ -+ drm_mode_config_init(dev); -+ -+ dev->mode_config.min_width = 0; -+ dev->mode_config.min_height = 0; -+ -+ dev->mode_config.funcs = (void *) &psb_mode_funcs; -+ -+ dev->mode_config.max_width = 2048; -+ dev->mode_config.max_height = 2048; -+ -+ /* set memory base */ -+ dev->mode_config.fb_base = -+ pci_resource_start(dev->pdev, 0); -+ -+ if (IS_MRST(dev)) -+ num_pipe = 1; -+ else -+ num_pipe = 2; -+ -+ -+ for (i = 0; i < num_pipe; i++) -+ psb_intel_crtc_init(dev, i, mode_dev); -+ -+ psb_setup_outputs(dev); -+ -+ /* setup fbs */ -+ /* drm_initial_config(dev, false); */ -+ } -+ -+ void psb_modeset_cleanup(struct drm_device *dev) -+ { -+ drm_mode_config_cleanup(dev); -+ } -diff -uNr a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h ---- a/drivers/gpu/drm/psb/psb_fb.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_fb.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,47 @@ -+/* -+ * Copyright (c) 2008, Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -+ * SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ * -+ **/ -+ -+#ifndef _PSB_FB_H_ -+#define _PSB_FB_H_ -+ -+struct psb_framebuffer { -+ struct drm_framebuffer base; -+ struct address_space *addr_space; -+ struct ttm_buffer_object *bo; -+ struct ttm_bo_kmap_obj kmap; -+ uint64_t offset; -+}; -+ -+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base) -+ -+ -+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask); -+ -+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t); -+ -+#endif -+ -diff -uNr a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c ---- a/drivers/gpu/drm/psb/psb_fence.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_fence.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,343 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+ -+static void psb_print_ta_fence_status(struct ttm_fence_device *fdev) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(fdev, struct drm_psb_private, fdev); -+ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq; -+ int i; -+ -+ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) { -+ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n", -+ (1 << i), -+ (unsigned long) seq->sequence, -+ seq->reported); -+ seq++; -+ } -+} -+ -+static void psb_poll_ta(struct ttm_fence_device *fdev, -+ uint32_t waiting_types) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(fdev, struct drm_psb_private, fdev); -+ uint32_t cur_flag = 1; -+ uint32_t flags = 0; -+ uint32_t sequence = 0; -+ uint32_t remaining = 0xFFFFFFFF; -+ uint32_t diff; -+ -+ struct psb_scheduler *scheduler; -+ struct psb_scheduler_seq *seq; -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[PSB_ENGINE_TA]; -+ -+ scheduler = &dev_priv->scheduler; -+ seq = scheduler->seq; -+ -+ while (likely(waiting_types & remaining)) { -+ if (!(waiting_types & cur_flag)) -+ goto skip; -+ if (seq->reported) -+ goto skip; -+ if (flags == 0) -+ sequence = seq->sequence; -+ else if (sequence != seq->sequence) { -+ ttm_fence_handler(fdev, PSB_ENGINE_TA, -+ sequence, flags, 0); -+ sequence = seq->sequence; -+ flags = 0; -+ } -+ flags |= cur_flag; -+ -+ /* -+ * Sequence may not have ended up on the ring yet. -+ * In that case, report it but don't mark it as -+ * reported. A subsequent poll will report it again. -+ */ -+ -+ diff = (fc->latest_queued_sequence - sequence) & -+ fc->sequence_mask; -+ if (diff < fc->wrap_diff) -+ seq->reported = 1; -+ -+skip: -+ cur_flag <<= 1; -+ remaining <<= 1; -+ seq++; -+ } -+ -+ if (flags) -+ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0); -+ -+} -+ -+static void psb_poll_other(struct ttm_fence_device *fdev, -+ uint32_t fence_class, uint32_t waiting_types) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(fdev, struct drm_psb_private, fdev); -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[fence_class]; -+ uint32_t sequence; -+ -+ if (unlikely(!dev_priv)) -+ return; -+ -+ if (waiting_types) { -+ switch (fence_class) { -+ case PSB_ENGINE_VIDEO: -+ sequence = dev_priv->msvdx_current_sequence; -+ break; -+ case LNC_ENGINE_ENCODE: -+ sequence = dev_priv->topaz_current_sequence; -+ break; -+ default: -+ sequence = dev_priv->comm[fence_class << 4]; -+ break; -+ } -+ -+ ttm_fence_handler(fdev, fence_class, sequence, -+ _PSB_FENCE_TYPE_EXE, 0); -+ -+ switch (fence_class) { -+ case PSB_ENGINE_2D: -+ if (dev_priv->fence0_irq_on && !fc->waiting_types) { -+ psb_2D_irq_off(dev_priv); -+ dev_priv->fence0_irq_on = 0; -+ } else if (!dev_priv->fence0_irq_on -+ && fc->waiting_types) { -+ psb_2D_irq_on(dev_priv); -+ dev_priv->fence0_irq_on = 1; -+ } -+ break; -+#if 0 -+ /* -+ * FIXME: MSVDX irq switching -+ */ -+ -+ case PSB_ENGINE_VIDEO: -+ if (dev_priv->fence2_irq_on && !fc->waiting_types) { -+ psb_msvdx_irq_off(dev_priv); -+ dev_priv->fence2_irq_on = 0; -+ } else if (!dev_priv->fence2_irq_on -+ && fc->pending_exe_flush) { -+ psb_msvdx_irq_on(dev_priv); -+ dev_priv->fence2_irq_on = 1; -+ } -+ break; -+#endif -+ default: -+ return; -+ } -+ } -+} -+ -+static void psb_fence_poll(struct ttm_fence_device *fdev, -+ uint32_t fence_class, uint32_t waiting_types) -+{ -+ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0))) -+ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class); -+ switch (fence_class) { -+ case PSB_ENGINE_TA: -+ psb_poll_ta(fdev, waiting_types); -+ break; -+ default: -+ psb_poll_other(fdev, fence_class, waiting_types); -+ break; -+ } -+} -+ -+void psb_fence_error(struct drm_device *dev, -+ uint32_t fence_class, -+ uint32_t sequence, uint32_t type, int error) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ unsigned long irq_flags; -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[fence_class]; -+ -+ BUG_ON(fence_class >= PSB_NUM_ENGINES); -+ write_lock_irqsave(&fc->lock, irq_flags); -+ ttm_fence_handler(fdev, fence_class, sequence, type, error); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+} -+ -+int psb_fence_emit_sequence(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t flags, uint32_t *sequence, -+ unsigned long *timeout_jiffies) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(fdev, struct drm_psb_private, fdev); -+ uint32_t seq = 0; -+ int ret; -+ -+ if (!dev_priv) -+ return -EINVAL; -+ -+ if (fence_class >= PSB_NUM_ENGINES) -+ return -EINVAL; -+ -+ switch (fence_class) { -+ case PSB_ENGINE_2D: -+ spin_lock(&dev_priv->sequence_lock); -+ seq = ++dev_priv->sequence[fence_class]; -+ spin_unlock(&dev_priv->sequence_lock); -+ ret = psb_blit_sequence(dev_priv, seq); -+ if (ret) -+ return ret; -+ break; -+ case PSB_ENGINE_VIDEO: -+ spin_lock(&dev_priv->sequence_lock); -+ seq = dev_priv->sequence[fence_class]++; -+ spin_unlock(&dev_priv->sequence_lock); -+ break; -+ case LNC_ENGINE_ENCODE: -+ spin_lock(&dev_priv->sequence_lock); -+ seq = dev_priv->sequence[fence_class]++; -+ spin_unlock(&dev_priv->sequence_lock); -+ break; -+ default: -+ spin_lock(&dev_priv->sequence_lock); -+ seq = dev_priv->sequence[fence_class]; -+ spin_unlock(&dev_priv->sequence_lock); -+ } -+ -+ *sequence = seq; -+ -+ if (fence_class == PSB_ENGINE_TA) -+ *timeout_jiffies = jiffies + DRM_HZ / 2; -+ else -+ *timeout_jiffies = jiffies + DRM_HZ * 3; -+ -+ return 0; -+} -+ -+uint32_t psb_fence_advance_sequence(struct drm_device *dev, -+ uint32_t fence_class) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ uint32_t sequence; -+ -+ spin_lock(&dev_priv->sequence_lock); -+ sequence = ++dev_priv->sequence[fence_class]; -+ spin_unlock(&dev_priv->sequence_lock); -+ -+ return sequence; -+} -+ -+static void psb_fence_lockup(struct ttm_fence_object *fence, -+ uint32_t fence_types) -+{ -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ -+ if (fence->fence_class == PSB_ENGINE_TA) { -+ -+ /* -+ * The 3D engine has its own lockup detection. -+ * Just extend the fence expiry time. -+ */ -+ -+ DRM_INFO("Extending 3D fence timeout.\n"); -+ write_lock(&fc->lock); -+ -+ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n", -+ (unsigned long) fence->sequence, fence_types, -+ fence->info.signaled_types); -+ -+ if (time_after_eq(jiffies, fence->timeout_jiffies)) -+ fence->timeout_jiffies = jiffies + DRM_HZ / 2; -+ -+ psb_print_ta_fence_status(fence->fdev); -+ write_unlock(&fc->lock); -+ } else { -+ DRM_ERROR -+ ("GPU timeout (probable lockup) detected on engine %u " -+ "fence type 0x%08x\n", -+ (unsigned int) fence->fence_class, -+ (unsigned int) fence_types); -+ write_lock(&fc->lock); -+ ttm_fence_handler(fence->fdev, fence->fence_class, -+ fence->sequence, fence_types, -EBUSY); -+ write_unlock(&fc->lock); -+ } -+} -+ -+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ struct ttm_fence_class_manager *fc = -+ &fdev->fence_class[fence_class]; -+ unsigned long irq_flags; -+ -+#ifdef FIX_TG_16 -+ if (fence_class == PSB_ENGINE_2D) { -+ -+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) && -+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && -+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & -+ _PSB_C2B_STATUS_BUSY) == 0)) -+ psb_resume_ta_2d_idle(dev_priv); -+ } -+#endif -+ write_lock_irqsave(&fc->lock, irq_flags); -+ psb_fence_poll(fdev, fence_class, fc->waiting_types); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+} -+ -+ -+static struct ttm_fence_driver psb_ttm_fence_driver = { -+ .has_irq = NULL, -+ .emit = psb_fence_emit_sequence, -+ .flush = NULL, -+ .poll = psb_fence_poll, -+ .needed_flush = NULL, -+ .wait = NULL, -+ .signaled = NULL, -+ .lockup = psb_fence_lockup, -+}; -+ -+int psb_ttm_fence_device_init(struct ttm_fence_device *fdev) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(fdev, struct drm_psb_private, fdev); -+ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30), -+ .flush_diff = (1 << 29), -+ .sequence_mask = 0xFFFFFFFF -+ }; -+ -+ return ttm_fence_device_init(PSB_NUM_ENGINES, -+ dev_priv->mem_global_ref.object, -+ fdev, &fci, 1, -+ &psb_ttm_fence_driver); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c ---- a/drivers/gpu/drm/psb/psb_gtt.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_gtt.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,257 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+ -+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) -+{ -+ uint32_t mask = PSB_PTE_VALID; -+ -+ if (type & PSB_MMU_CACHED_MEMORY) -+ mask |= PSB_PTE_CACHED; -+ if (type & PSB_MMU_RO_MEMORY) -+ mask |= PSB_PTE_RO; -+ if (type & PSB_MMU_WO_MEMORY) -+ mask |= PSB_PTE_WO; -+ -+ return (pfn << PAGE_SHIFT) | mask; -+} -+ -+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev) -+{ -+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER); -+ -+ if (!tmp) -+ return NULL; -+ -+ init_rwsem(&tmp->sem); -+ tmp->dev = dev; -+ -+ return tmp; -+} -+ -+void psb_gtt_takedown(struct psb_gtt *pg, int free) -+{ -+ struct drm_psb_private *dev_priv = pg->dev->dev_private; -+ -+ if (!pg) -+ return; -+ -+ if (pg->gtt_map) { -+ iounmap(pg->gtt_map); -+ pg->gtt_map = NULL; -+ } -+ if (pg->initialized) { -+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL, -+ pg->gmch_ctrl); -+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL); -+ (void) PSB_RVDC32(PSB_PGETBL_CTL); -+ } -+ if (free) -+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER); -+} -+ -+int psb_gtt_init(struct psb_gtt *pg, int resume) -+{ -+ struct drm_device *dev = pg->dev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ unsigned gtt_pages; -+ unsigned long stolen_size, vram_stolen_size, ci_stolen_size; -+ unsigned i, num_pages; -+ unsigned pfn_base; -+ -+ int ret = 0; -+ uint32_t pte; -+ -+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl); -+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, -+ pg->gmch_ctrl | _PSB_GMCH_ENABLED); -+ -+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); -+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); -+ (void) PSB_RVDC32(PSB_PGETBL_CTL); -+ -+ pg->initialized = 1; -+ -+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK; -+ -+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); -+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); -+ gtt_pages = -+ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT; -+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) -+ >> PAGE_SHIFT; -+ -+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base); -+ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE; -+ -+ ci_stolen_size = dev_priv->ci_region_size; -+ /* add CI & RAR share buffer space to stolen_size */ -+ /* stolen_size = vram_stolen_size + ci_stolen_size; */ -+ stolen_size = vram_stolen_size; -+ -+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start); -+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start); -+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start); -+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages); -+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024); -+ -+ if (resume && (gtt_pages != pg->gtt_pages) && -+ (stolen_size != pg->stolen_size)) { -+ DRM_ERROR("GTT resume error.\n"); -+ ret = -EINVAL; -+ goto out_err; -+ } -+ -+ pg->gtt_pages = gtt_pages; -+ pg->stolen_size = stolen_size; -+ pg->vram_stolen_size = vram_stolen_size; -+ pg->ci_stolen_size = ci_stolen_size; -+ pg->gtt_map = -+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); -+ if (!pg->gtt_map) { -+ DRM_ERROR("Failure to map gtt.\n"); -+ ret = -ENOMEM; -+ goto out_err; -+ } -+ -+ /* -+ * insert vram stolen pages. -+ */ -+ -+ pfn_base = pg->stolen_base >> PAGE_SHIFT; -+ num_pages = vram_stolen_size >> PAGE_SHIFT; -+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n", -+ num_pages, pfn_base); -+ for (i = 0; i < num_pages; ++i) { -+ pte = psb_gtt_mask_pte(pfn_base + i, 0); -+ iowrite32(pte, pg->gtt_map + i); -+ } -+#if 0 -+ /* -+ * insert CI stolen pages -+ */ -+ -+ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT; -+ num_pages = ci_stolen_size >> PAGE_SHIFT; -+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n", -+ num_pages, pfn_base); -+ for (; i < num_pages; ++i) { -+ pte = psb_gtt_mask_pte(pfn_base + i, 0); -+ iowrite32(pte, pg->gtt_map + i); -+ } -+#endif -+ /* -+ * Init rest of gtt. -+ */ -+ -+ pfn_base = page_to_pfn(dev_priv->scratch_page); -+ pte = psb_gtt_mask_pte(pfn_base, 0); -+ PSB_DEBUG_INIT("Initializing the rest of a total " -+ "of %d gtt pages.\n", pg->gatt_pages); -+ -+ for (; i < pg->gatt_pages; ++i) -+ iowrite32(pte, pg->gtt_map + i); -+ (void) ioread32(pg->gtt_map + i - 1); -+ -+ return 0; -+ -+out_err: -+ psb_gtt_takedown(pg, 0); -+ return ret; -+} -+ -+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages, -+ unsigned offset_pages, unsigned num_pages, -+ unsigned desired_tile_stride, -+ unsigned hw_tile_stride, int type) -+{ -+ unsigned rows = 1; -+ unsigned add; -+ unsigned row_add; -+ unsigned i; -+ unsigned j; -+ uint32_t *cur_page = NULL; -+ uint32_t pte; -+ -+ if (hw_tile_stride) -+ rows = num_pages / desired_tile_stride; -+ else -+ desired_tile_stride = num_pages; -+ -+ add = desired_tile_stride; -+ row_add = hw_tile_stride; -+ -+ down_read(&pg->sem); -+ for (i = 0; i < rows; ++i) { -+ cur_page = pg->gtt_map + offset_pages; -+ for (j = 0; j < desired_tile_stride; ++j) { -+ pte = -+ psb_gtt_mask_pte(page_to_pfn(*pages++), type); -+ iowrite32(pte, cur_page++); -+ } -+ offset_pages += add; -+ } -+ (void) ioread32(cur_page - 1); -+ up_read(&pg->sem); -+ -+ return 0; -+} -+ -+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages, -+ unsigned num_pages, unsigned desired_tile_stride, -+ unsigned hw_tile_stride) -+{ -+ struct drm_psb_private *dev_priv = pg->dev->dev_private; -+ unsigned rows = 1; -+ unsigned add; -+ unsigned row_add; -+ unsigned i; -+ unsigned j; -+ uint32_t *cur_page = NULL; -+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page); -+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0); -+ -+ if (hw_tile_stride) -+ rows = num_pages / desired_tile_stride; -+ else -+ desired_tile_stride = num_pages; -+ -+ add = desired_tile_stride; -+ row_add = hw_tile_stride; -+ -+ down_read(&pg->sem); -+ for (i = 0; i < rows; ++i) { -+ cur_page = pg->gtt_map + offset_pages; -+ for (j = 0; j < desired_tile_stride; ++j) -+ iowrite32(pte, cur_page++); -+ -+ offset_pages += add; -+ } -+ (void) ioread32(cur_page - 1); -+ up_read(&pg->sem); -+ -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c ---- a/drivers/gpu/drm/psb/psb_intel_display.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_display.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,2435 @@ -+/* -+ * Copyright © 2006-2007 Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ */ -+ -+#include <linux/i2c.h> -+ -+#include <drm/drm_crtc_helper.h> -+#include "psb_fb.h" -+#include "psb_intel_display.h" -+ -+ -+struct psb_intel_clock_t { -+ /* given values */ -+ int n; -+ int m1, m2; -+ int p1, p2; -+ /* derived values */ -+ int dot; -+ int vco; -+ int m; -+ int p; -+}; -+ -+struct psb_intel_range_t { -+ int min, max; -+}; -+ -+struct psb_intel_p2_t { -+ int dot_limit; -+ int p2_slow, p2_fast; -+}; -+ -+#define INTEL_P2_NUM 2 -+ -+struct psb_intel_limit_t { -+ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; -+ struct psb_intel_p2_t p2; -+}; -+ -+#define I8XX_DOT_MIN 25000 -+#define I8XX_DOT_MAX 350000 -+#define I8XX_VCO_MIN 930000 -+#define I8XX_VCO_MAX 1400000 -+#define I8XX_N_MIN 3 -+#define I8XX_N_MAX 16 -+#define I8XX_M_MIN 96 -+#define I8XX_M_MAX 140 -+#define I8XX_M1_MIN 18 -+#define I8XX_M1_MAX 26 -+#define I8XX_M2_MIN 6 -+#define I8XX_M2_MAX 16 -+#define I8XX_P_MIN 4 -+#define I8XX_P_MAX 128 -+#define I8XX_P1_MIN 2 -+#define I8XX_P1_MAX 33 -+#define I8XX_P1_LVDS_MIN 1 -+#define I8XX_P1_LVDS_MAX 6 -+#define I8XX_P2_SLOW 4 -+#define I8XX_P2_FAST 2 -+#define I8XX_P2_LVDS_SLOW 14 -+#define I8XX_P2_LVDS_FAST 14 /* No fast option */ -+#define I8XX_P2_SLOW_LIMIT 165000 -+ -+#define I9XX_DOT_MIN 20000 -+#define I9XX_DOT_MAX 400000 -+#define I9XX_VCO_MIN 1400000 -+#define I9XX_VCO_MAX 2800000 -+#define I9XX_N_MIN 3 -+#define I9XX_N_MAX 8 -+#define I9XX_M_MIN 70 -+#define I9XX_M_MAX 120 -+#define I9XX_M1_MIN 10 -+#define I9XX_M1_MAX 20 -+#define I9XX_M2_MIN 5 -+#define I9XX_M2_MAX 9 -+#define I9XX_P_SDVO_DAC_MIN 5 -+#define I9XX_P_SDVO_DAC_MAX 80 -+#define I9XX_P_LVDS_MIN 7 -+#define I9XX_P_LVDS_MAX 98 -+#define I9XX_P1_MIN 1 -+#define I9XX_P1_MAX 8 -+#define I9XX_P2_SDVO_DAC_SLOW 10 -+#define I9XX_P2_SDVO_DAC_FAST 5 -+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 -+#define I9XX_P2_LVDS_SLOW 14 -+#define I9XX_P2_LVDS_FAST 7 -+#define I9XX_P2_LVDS_SLOW_LIMIT 112000 -+ -+#define INTEL_LIMIT_I8XX_DVO_DAC 0 -+#define INTEL_LIMIT_I8XX_LVDS 1 -+#define INTEL_LIMIT_I9XX_SDVO_DAC 2 -+#define INTEL_LIMIT_I9XX_LVDS 3 -+ -+static const struct psb_intel_limit_t psb_intel_limits[] = { -+ { /* INTEL_LIMIT_I8XX_DVO_DAC */ -+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, -+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, -+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, -+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, -+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, -+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, -+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, -+ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX}, -+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, -+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST}, -+ }, -+ { /* INTEL_LIMIT_I8XX_LVDS */ -+ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, -+ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, -+ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, -+ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, -+ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, -+ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, -+ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, -+ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX}, -+ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, -+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST}, -+ }, -+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */ -+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, -+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, -+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, -+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, -+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, -+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, -+ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, -+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, -+ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, -+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = -+ I9XX_P2_SDVO_DAC_FAST}, -+ }, -+ { /* INTEL_LIMIT_I9XX_LVDS */ -+ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, -+ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, -+ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, -+ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, -+ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, -+ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, -+ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, -+ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, -+ /* The single-channel range is 25-112Mhz, and dual-channel -+ * is 80-224Mhz. Prefer single channel as much as possible. -+ */ -+ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, -+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, -+ }, -+}; -+ -+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ const struct psb_intel_limit_t *limit; -+ -+ if (IS_I9XX(dev)) { -+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) -+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; -+ else -+ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; -+ } else { -+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) -+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS]; -+ else -+ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; -+ } -+ return limit; -+} -+ -+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ -+ -+static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock) -+{ -+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); -+ clock->p = clock->p1 * clock->p2; -+ clock->vco = refclk * clock->m / (clock->n + 2); -+ clock->dot = clock->vco / clock->p; -+} -+ -+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ -+ -+static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock) -+{ -+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); -+ clock->p = clock->p1 * clock->p2; -+ clock->vco = refclk * clock->m / (clock->n + 2); -+ clock->dot = clock->vco / clock->p; -+} -+ -+static void psb_intel_clock(struct drm_device *dev, int refclk, -+ struct psb_intel_clock_t *clock) -+{ -+ if (IS_I9XX(dev)) -+ return i9xx_clock(refclk, clock); -+ else -+ return i8xx_clock(refclk, clock); -+} -+ -+/** -+ * Returns whether any output on the specified pipe is of the specified type -+ */ -+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_connector *l_entry; -+ -+ list_for_each_entry(l_entry, &mode_config->connector_list, head) { -+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) { -+ struct psb_intel_output *psb_intel_output = -+ to_psb_intel_output(l_entry); -+ if (psb_intel_output->type == type) -+ return true; -+ } -+ } -+ return false; -+} -+ -+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } -+/** -+ * Returns whether the given set of divisors are valid for a given refclk with -+ * the given connectors. -+ */ -+ -+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, -+ struct psb_intel_clock_t *clock) -+{ -+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); -+ -+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) -+ INTELPllInvalid("p1 out of range\n"); -+ if (clock->p < limit->p.min || limit->p.max < clock->p) -+ INTELPllInvalid("p out of range\n"); -+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) -+ INTELPllInvalid("m2 out of range\n"); -+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) -+ INTELPllInvalid("m1 out of range\n"); -+ if (clock->m1 <= clock->m2) -+ INTELPllInvalid("m1 <= m2\n"); -+ if (clock->m < limit->m.min || limit->m.max < clock->m) -+ INTELPllInvalid("m out of range\n"); -+ if (clock->n < limit->n.min || limit->n.max < clock->n) -+ INTELPllInvalid("n out of range\n"); -+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) -+ INTELPllInvalid("vco out of range\n"); -+ /* XXX: We may need to be checking "Dot clock" -+ * depending on the multiplier, connector, etc., -+ * rather than just a single range. -+ */ -+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) -+ INTELPllInvalid("dot out of range\n"); -+ -+ return true; -+} -+ -+/** -+ * Returns a set of divisors for the desired target clock with the given -+ * refclk, or FALSE. The returned values represent the clock equation: -+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. -+ */ -+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, -+ int refclk, -+ struct psb_intel_clock_t *best_clock) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_clock_t clock; -+ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); -+ int err = target; -+ -+ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && -+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { -+ /* -+ * For LVDS, if the panel is on, just rely on its current -+ * settings for dual-channel. We haven't figured out how to -+ * reliably set up different single/dual channel state, if we -+ * even can. -+ */ -+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == -+ LVDS_CLKB_POWER_UP) -+ clock.p2 = limit->p2.p2_fast; -+ else -+ clock.p2 = limit->p2.p2_slow; -+ } else { -+ if (target < limit->p2.dot_limit) -+ clock.p2 = limit->p2.p2_slow; -+ else -+ clock.p2 = limit->p2.p2_fast; -+ } -+ -+ memset(best_clock, 0, sizeof(*best_clock)); -+ -+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; -+ clock.m1++) { -+ for (clock.m2 = limit->m2.min; -+ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; -+ clock.m2++) { -+ for (clock.n = limit->n.min; -+ clock.n <= limit->n.max; clock.n++) { -+ for (clock.p1 = limit->p1.min; -+ clock.p1 <= limit->p1.max; -+ clock.p1++) { -+ int this_err; -+ -+ psb_intel_clock(dev, refclk, &clock); -+ -+ if (!psb_intel_PLL_is_valid -+ (crtc, &clock)) -+ continue; -+ -+ this_err = abs(clock.dot - target); -+ if (this_err < err) { -+ *best_clock = clock; -+ err = this_err; -+ } -+ } -+ } -+ } -+ } -+ -+ return err != target; -+} -+ -+void psb_intel_wait_for_vblank(struct drm_device *dev) -+{ -+ /* Wait for 20ms, i.e. one cycle at 50hz. */ -+ udelay(20000); -+} -+ -+int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) -+{ -+ struct drm_device *dev = crtc->dev; -+ /* struct drm_i915_master_private *master_priv; */ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); -+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; -+ int pipe = psb_intel_crtc->pipe; -+ unsigned long Start, Offset; -+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); -+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); -+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; -+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; -+ u32 dspcntr; -+ -+ /* no fb bound */ -+ if (!crtc->fb) { -+ DRM_DEBUG("No FB bound\n"); -+ return 0; -+ } -+ -+ if (IS_MRST(dev) && (pipe == 0)) -+ dspbase = MRST_DSPABASE; -+ -+ Start = mode_dev->bo_offset(dev, psbfb->bo); -+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); -+ -+ REG_WRITE(dspstride, crtc->fb->pitch); -+ -+ dspcntr = REG_READ(dspcntr_reg); -+ switch (crtc->fb->bits_per_pixel) { -+ case 8: -+ dspcntr |= DISPPLANE_8BPP; -+ break; -+ case 16: -+ if (crtc->fb->depth == 15) -+ dspcntr |= DISPPLANE_15_16BPP; -+ else -+ dspcntr |= DISPPLANE_16BPP; -+ break; -+ case 24: -+ case 32: -+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA; -+ break; -+ default: -+ DRM_ERROR("Unknown color depth\n"); -+ return -EINVAL; -+ } -+ REG_WRITE(dspcntr_reg, dspcntr); -+ -+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); -+ if (IS_I965G(dev) || IS_MRST(dev)) { -+ REG_WRITE(dspbase, Offset); -+ REG_READ(dspbase); -+ REG_WRITE(dspsurf, Start); -+ REG_READ(dspsurf); -+ } else { -+ REG_WRITE(dspbase, Start + Offset); -+ REG_READ(dspbase); -+ } -+ -+ if (!dev->primary->master) -+ return 0; -+ -+#if 0 /* JB: Enable sarea later */ -+ master_priv = dev->primary->master->driver_priv; -+ if (!master_priv->sarea_priv) -+ return 0; -+ -+ switch (pipe) { -+ case 0: -+ master_priv->sarea_priv->planeA_x = x; -+ master_priv->sarea_priv->planeA_y = y; -+ break; -+ case 1: -+ master_priv->sarea_priv->planeB_x = x; -+ master_priv->sarea_priv->planeB_y = y; -+ break; -+ default: -+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); -+ break; -+ } -+#endif -+} -+ -+ -+ -+/** -+ * Sets the power management mode of the pipe and plane. -+ * -+ * This code should probably grow support for turning the cursor off and back -+ * on appropriately at the same time as we're turning the pipe off/on. -+ */ -+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) -+{ -+ struct drm_device *dev = crtc->dev; -+ /* struct drm_i915_master_private *master_priv; */ -+ /* struct drm_i915_private *dev_priv = dev->dev_private; */ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; -+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; -+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; -+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; -+ u32 temp; -+ bool enabled; -+ -+ /* XXX: When our outputs are all unaware of DPMS modes other than off -+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. -+ */ -+ switch (mode) { -+ case DRM_MODE_DPMS_ON: -+ case DRM_MODE_DPMS_STANDBY: -+ case DRM_MODE_DPMS_SUSPEND: -+ /* Enable the DPLL */ -+ temp = REG_READ(dpll_reg); -+ if ((temp & DPLL_VCO_ENABLE) == 0) { -+ REG_WRITE(dpll_reg, temp); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ } -+ -+ /* Enable the pipe */ -+ temp = REG_READ(pipeconf_reg); -+ if ((temp & PIPEACONF_ENABLE) == 0) -+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); -+ -+ /* Enable the plane */ -+ temp = REG_READ(dspcntr_reg); -+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) { -+ REG_WRITE(dspcntr_reg, -+ temp | DISPLAY_PLANE_ENABLE); -+ /* Flush the plane changes */ -+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); -+ } -+ -+ psb_intel_crtc_load_lut(crtc); -+ -+ /* Give the overlay scaler a chance to enable -+ * if it's on this pipe */ -+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ -+ break; -+ case DRM_MODE_DPMS_OFF: -+ /* Give the overlay scaler a chance to disable -+ * if it's on this pipe */ -+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ -+ -+ /* Disable the VGA plane that we never use */ -+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); -+ -+ /* Disable display plane */ -+ temp = REG_READ(dspcntr_reg); -+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) { -+ REG_WRITE(dspcntr_reg, -+ temp & ~DISPLAY_PLANE_ENABLE); -+ /* Flush the plane changes */ -+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); -+ REG_READ(dspbase_reg); -+ } -+ -+ if (!IS_I9XX(dev)) { -+ /* Wait for vblank for the disable to take effect */ -+ psb_intel_wait_for_vblank(dev); -+ } -+ -+ /* Next, disable display pipes */ -+ temp = REG_READ(pipeconf_reg); -+ if ((temp & PIPEACONF_ENABLE) != 0) { -+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); -+ REG_READ(pipeconf_reg); -+ } -+ -+ /* Wait for vblank for the disable to take effect. */ -+ psb_intel_wait_for_vblank(dev); -+ -+ temp = REG_READ(dpll_reg); -+ if ((temp & DPLL_VCO_ENABLE) != 0) { -+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ } -+ -+ /* Wait for the clocks to turn off. */ -+ udelay(150); -+ break; -+ } -+ -+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; -+ -+#if 0 /* JB: Add vblank support later */ -+ if (enabled) -+ dev_priv->vblank_pipe |= (1 << pipe); -+ else -+ dev_priv->vblank_pipe &= ~(1 << pipe); -+#endif -+ -+ psb_intel_crtc->dpms_mode = mode; -+ -+#if 0 /* JB: Add sarea support later */ -+ if (!dev->primary->master) -+ return 0; -+ -+ master_priv = dev->primary->master->driver_priv; -+ if (!master_priv->sarea_priv) -+ return 0; -+ -+ switch (pipe) { -+ case 0: -+ master_priv->sarea_priv->planeA_w = -+ enabled ? crtc->mode.hdisplay : 0; -+ master_priv->sarea_priv->planeA_h = -+ enabled ? crtc->mode.vdisplay : 0; -+ break; -+ case 1: -+ master_priv->sarea_priv->planeB_w = -+ enabled ? crtc->mode.hdisplay : 0; -+ master_priv->sarea_priv->planeB_h = -+ enabled ? crtc->mode.vdisplay : 0; -+ break; -+ default: -+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); -+ break; -+ } -+#endif -+} -+ -+static void psb_intel_crtc_prepare(struct drm_crtc *crtc) -+{ -+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -+} -+ -+static void psb_intel_crtc_commit(struct drm_crtc *crtc) -+{ -+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -+} -+ -+void psb_intel_encoder_prepare(struct drm_encoder *encoder) -+{ -+ struct drm_encoder_helper_funcs *encoder_funcs = -+ encoder->helper_private; -+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */ -+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); -+} -+ -+void psb_intel_encoder_commit(struct drm_encoder *encoder) -+{ -+ struct drm_encoder_helper_funcs *encoder_funcs = -+ encoder->helper_private; -+ /* lvds has its own version of commit see psb_intel_lvds_commit */ -+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); -+} -+ -+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ return true; -+} -+ -+ -+/** Returns the core display clock speed for i830 - i945 */ -+static int psb_intel_get_core_clock_speed(struct drm_device *dev) -+{ -+#if 0 /* JB: Look into this more */ -+ /* Core clock values taken from the published datasheets. -+ * The 830 may go up to 166 Mhz, which we should check. -+ */ -+ if (IS_I945G(dev)) -+ return 400000; -+ else if (IS_I915G(dev)) -+ return 333000; -+ else if (IS_I945GM(dev) || IS_845G(dev)) -+ return 200000; -+ else if (IS_I915GM(dev)) { -+ u16 gcfgc = 0; -+ -+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc); -+ -+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) -+ return 133000; -+ else { -+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { -+ case GC_DISPLAY_CLOCK_333_MHZ: -+ return 333000; -+ default: -+ case GC_DISPLAY_CLOCK_190_200_MHZ: -+ return 190000; -+ } -+ } -+ } else if (IS_I865G(dev)) -+ return 266000; -+ else if (IS_I855(dev)) { -+#if 0 -+ PCITAG bridge = pciTag(0, 0, 0); -+ /* This is always the host bridge */ -+ u16 hpllcc = pciReadWord(bridge, HPLLCC); -+ -+#endif -+ u16 hpllcc = 0; -+ /* Assume that the hardware is in the high speed state. This -+ * should be the default. -+ */ -+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) { -+ case GC_CLOCK_133_200: -+ case GC_CLOCK_100_200: -+ return 200000; -+ case GC_CLOCK_166_250: -+ return 250000; -+ case GC_CLOCK_100_133: -+ return 133000; -+ } -+ } else /* 852, 830 */ -+ return 133000; -+#endif -+ return 0; /* Silence gcc warning */ -+} -+ -+ -+/** -+ * Return the pipe currently connected to the panel fitter, -+ * or -1 if the panel fitter is not present or not in use -+ */ -+static int psb_intel_panel_fitter_pipe(struct drm_device *dev) -+{ -+ u32 pfit_control; -+ -+ /* i830 doesn't have a panel fitter */ -+ if (IS_I830(dev)) -+ return -1; -+ -+ pfit_control = REG_READ(PFIT_CONTROL); -+ -+ /* See if the panel fitter is in use */ -+ if ((pfit_control & PFIT_ENABLE) == 0) -+ return -1; -+ -+ /* 965 can place panel fitter on either pipe */ -+ if (IS_I965G(dev) || IS_MRST(dev)) -+ return (pfit_control >> 29) & 0x3; -+ -+ /* older chips can only use pipe 1 */ -+ return 1; -+} -+ -+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode, -+ int x, int y, -+ struct drm_framebuffer *old_fb) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ int fp_reg = (pipe == 0) ? FPA0 : FPB0; -+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; -+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; -+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; -+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; -+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; -+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; -+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; -+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; -+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; -+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; -+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; -+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; -+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; -+ int refclk; -+ struct psb_intel_clock_t clock; -+ u32 dpll = 0, fp = 0, dspcntr, pipeconf; -+ bool ok, is_sdvo = false, is_dvo = false; -+ bool is_crt = false, is_lvds = false, is_tv = false; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_connector *connector; -+ -+ list_for_each_entry(connector, &mode_config->connector_list, head) { -+ struct psb_intel_output *psb_intel_output = -+ to_psb_intel_output(connector); -+ -+ if (!connector->encoder -+ || connector->encoder->crtc != crtc) -+ continue; -+ -+ switch (psb_intel_output->type) { -+ case INTEL_OUTPUT_LVDS: -+ is_lvds = true; -+ break; -+ case INTEL_OUTPUT_SDVO: -+ is_sdvo = true; -+ break; -+ case INTEL_OUTPUT_DVO: -+ is_dvo = true; -+ break; -+ case INTEL_OUTPUT_TVOUT: -+ is_tv = true; -+ break; -+ case INTEL_OUTPUT_ANALOG: -+ is_crt = true; -+ break; -+ } -+ } -+ -+ if (IS_I9XX(dev)) -+ refclk = 96000; -+ else -+ refclk = 48000; -+ -+ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, -+ &clock); -+ if (!ok) { -+ DRM_ERROR("Couldn't find PLL settings for mode!\n"); -+ return 0; -+ } -+ -+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2; -+ -+ dpll = DPLL_VGA_MODE_DIS; -+ if (IS_I9XX(dev)) { -+ if (is_lvds) { -+ dpll |= DPLLB_MODE_LVDS; -+ if (IS_POULSBO(dev)) -+ dpll |= DPLL_DVO_HIGH_SPEED; -+ } else -+ dpll |= DPLLB_MODE_DAC_SERIAL; -+ if (is_sdvo) { -+ dpll |= DPLL_DVO_HIGH_SPEED; -+ if (IS_I945G(dev) || IS_I945GM(dev)) { -+ int sdvo_pixel_multiply = -+ adjusted_mode->clock / mode->clock; -+ dpll |= -+ (sdvo_pixel_multiply - -+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES; -+ } -+ } -+ -+ /* compute bitmask from p1 value */ -+ dpll |= (1 << (clock.p1 - 1)) << 16; -+ switch (clock.p2) { -+ case 5: -+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; -+ break; -+ case 7: -+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; -+ break; -+ case 10: -+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; -+ break; -+ case 14: -+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; -+ break; -+ } -+ if (IS_I965G(dev)) -+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); -+ } else { -+ if (is_lvds) { -+ dpll |= -+ (1 << (clock.p1 - 1)) << -+ DPLL_FPA01_P1_POST_DIV_SHIFT; -+ } else { -+ if (clock.p1 == 2) -+ dpll |= PLL_P1_DIVIDE_BY_TWO; -+ else -+ dpll |= -+ (clock.p1 - -+ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; -+ if (clock.p2 == 4) -+ dpll |= PLL_P2_DIVIDE_BY_4; -+ } -+ } -+ -+ if (is_tv) { -+ /* XXX: just matching BIOS for now */ -+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ -+ dpll |= 3; -+ } -+#if 0 -+ else if (is_lvds) -+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; -+#endif -+ else -+ dpll |= PLL_REF_INPUT_DREFCLK; -+ -+ /* setup pipeconf */ -+ pipeconf = REG_READ(pipeconf_reg); -+ -+ /* Set up the display plane register */ -+ dspcntr = DISPPLANE_GAMMA_ENABLE; -+ -+ if (pipe == 0) -+ dspcntr |= DISPPLANE_SEL_PIPE_A; -+ else -+ dspcntr |= DISPPLANE_SEL_PIPE_B; -+ -+ if (pipe == 0 && !IS_I965G(dev)) { -+ /* Enable pixel doubling when the dot clock is > 90% -+ * of the (display) core speed. -+ * -+ * XXX: No double-wide on 915GM pipe B. -+ * Is that the only reason for the -+ * pipe == 0 check? -+ */ -+ if (mode->clock > psb_intel_get_core_clock_speed(dev) * 9 / 10) -+ pipeconf |= PIPEACONF_DOUBLE_WIDE; -+ else -+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE; -+ } -+ -+ dspcntr |= DISPLAY_PLANE_ENABLE; -+ pipeconf |= PIPEACONF_ENABLE; -+ dpll |= DPLL_VCO_ENABLE; -+ -+ -+ /* Disable the panel fitter if it was on our pipe */ -+ if (psb_intel_panel_fitter_pipe(dev) == pipe) -+ REG_WRITE(PFIT_CONTROL, 0); -+ -+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); -+ drm_mode_debug_printmodeline(mode); -+ -+#if 0 -+ if (!xf86ModesEqual(mode, adjusted_mode)) { -+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, -+ "Adjusted mode for pipe %c:\n", -+ pipe == 0 ? 'A' : 'B'); -+ xf86PrintModeline(pScrn->scrnIndex, mode); -+ } -+ i830PrintPll("chosen", &clock); -+#endif -+ -+ if (dpll & DPLL_VCO_ENABLE) { -+ REG_WRITE(fp_reg, fp); -+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ udelay(150); -+ } -+ -+ /* The LVDS pin pair needs to be on before the DPLLs are enabled. -+ * This is an exception to the general rule that mode_set doesn't turn -+ * things on. -+ */ -+ if (is_lvds) { -+ u32 lvds = REG_READ(LVDS); -+ -+ lvds |= -+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | -+ LVDS_PIPEB_SELECT; -+ /* Set the B0-B3 data pairs corresponding to -+ * whether we're going to -+ * set the DPLLs for dual-channel mode or not. -+ */ -+ if (clock.p2 == 7) -+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; -+ else -+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); -+ -+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) -+ * appropriately here, but we need to look more -+ * thoroughly into how panels behave in the two modes. -+ */ -+ -+ REG_WRITE(LVDS, lvds); -+ REG_READ(LVDS); -+ } -+ -+ REG_WRITE(fp_reg, fp); -+ REG_WRITE(dpll_reg, dpll); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ -+ if (IS_I965G(dev)) { -+ int sdvo_pixel_multiply = -+ adjusted_mode->clock / mode->clock; -+ REG_WRITE(dpll_md_reg, -+ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | -+ ((sdvo_pixel_multiply - -+ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); -+ } else { -+ /* write it again -- the BIOS does, after all */ -+ REG_WRITE(dpll_reg, dpll); -+ } -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ -+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | -+ ((adjusted_mode->crtc_htotal - 1) << 16)); -+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | -+ ((adjusted_mode->crtc_hblank_end - 1) << 16)); -+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | -+ ((adjusted_mode->crtc_hsync_end - 1) << 16)); -+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | -+ ((adjusted_mode->crtc_vtotal - 1) << 16)); -+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | -+ ((adjusted_mode->crtc_vblank_end - 1) << 16)); -+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | -+ ((adjusted_mode->crtc_vsync_end - 1) << 16)); -+ /* pipesrc and dspsize control the size that is scaled from, -+ * which should always be the user's requested size. -+ */ -+ REG_WRITE(dspsize_reg, -+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); -+ REG_WRITE(dsppos_reg, 0); -+ REG_WRITE(pipesrc_reg, -+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); -+ REG_WRITE(pipeconf_reg, pipeconf); -+ REG_READ(pipeconf_reg); -+ -+ psb_intel_wait_for_vblank(dev); -+ -+ REG_WRITE(dspcntr_reg, dspcntr); -+ -+ /* Flush the plane changes */ -+ { -+ struct drm_crtc_helper_funcs *crtc_funcs = -+ crtc->helper_private; -+ crtc_funcs->mode_set_base(crtc, x, y, old_fb); -+ } -+ -+ psb_intel_wait_for_vblank(dev); -+ -+ return 0; -+} -+ -+/** Loads the palette/gamma unit for the CRTC with the prepared values */ -+void psb_intel_crtc_load_lut(struct drm_crtc *crtc) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; -+ int i; -+ -+ /* The clocks have to be on to load the palette. */ -+ if (!crtc->enabled) -+ return; -+ -+ for (i = 0; i < 256; i++) { -+ REG_WRITE(palreg + 4 * i, -+ (psb_intel_crtc->lut_r[i] << 16) | -+ (psb_intel_crtc->lut_g[i] << 8) | -+ psb_intel_crtc->lut_b[i]); -+ } -+} -+ -+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, -+ struct drm_file *file_priv, -+ uint32_t handle, -+ uint32_t width, uint32_t height) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev; -+ int pipe = psb_intel_crtc->pipe; -+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; -+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; -+ uint32_t temp; -+ size_t addr = 0; -+ size_t size; -+ void *bo; -+ int ret; -+ -+ DRM_DEBUG("\n"); -+ -+ /* if we want to turn of the cursor ignore width and height */ -+ if (!handle) { -+ DRM_DEBUG("cursor off\n"); -+ /* turn of the cursor */ -+ temp = 0; -+ temp |= CURSOR_MODE_DISABLE; -+ -+ REG_WRITE(control, temp); -+ REG_WRITE(base, 0); -+ -+ /* unpin the old bo */ -+ if (psb_intel_crtc->cursor_bo) { -+ mode_dev->bo_unpin_for_scanout(dev, -+ psb_intel_crtc-> -+ cursor_bo); -+ psb_intel_crtc->cursor_bo = NULL; -+ } -+ -+ return 0; -+ } -+ -+ /* Currently we only support 64x64 cursors */ -+ if (width != 64 || height != 64) { -+ DRM_ERROR("we currently only support 64x64 cursors\n"); -+ return -EINVAL; -+ } -+ -+ bo = mode_dev->bo_from_handle(dev, file_priv, handle); -+ if (!bo) -+ return -ENOENT; -+ -+ ret = mode_dev->bo_pin_for_scanout(dev, bo); -+ if (ret) -+ return ret; -+ -+ size = mode_dev->bo_size(dev, bo); -+ if (size < width * height * 4) { -+ DRM_ERROR("buffer is to small\n"); -+ return -ENOMEM; -+ } -+ -+ addr = mode_dev->bo_size(dev, bo); -+ if (mode_dev->cursor_needs_physical) -+ addr = dev->agp->base + addr; -+ -+ psb_intel_crtc->cursor_addr = addr; -+ temp = 0; -+ /* set the pipe for the cursor */ -+ temp |= (pipe << 28); -+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; -+ -+ REG_WRITE(control, temp); -+ REG_WRITE(base, addr); -+ -+ /* unpin the old bo */ -+ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) { -+ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo); -+ psb_intel_crtc->cursor_bo = bo; -+ } -+ -+ return 0; -+} -+ -+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ uint32_t temp = 0; -+ uint32_t adder; -+ -+ if (x < 0) { -+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); -+ x = -x; -+ } -+ if (y < 0) { -+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); -+ y = -y; -+ } -+ -+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); -+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); -+ -+ adder = psb_intel_crtc->cursor_addr; -+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); -+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); -+ -+ return 0; -+} -+ -+/** Sets the color ramps on behalf of RandR */ -+void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, -+ u16 blue, int regno) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ -+ psb_intel_crtc->lut_r[regno] = red >> 8; -+ psb_intel_crtc->lut_g[regno] = green >> 8; -+ psb_intel_crtc->lut_b[regno] = blue >> 8; -+} -+ -+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, -+ u16 *green, u16 *blue, uint32_t size) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int i; -+ -+ if (size != 256) -+ return; -+ -+ for (i = 0; i < 256; i++) { -+ psb_intel_crtc->lut_r[i] = red[i] >> 8; -+ psb_intel_crtc->lut_g[i] = green[i] >> 8; -+ psb_intel_crtc->lut_b[i] = blue[i] >> 8; -+ } -+ -+ psb_intel_crtc_load_lut(crtc); -+} -+ -+/** -+ * Get a pipe with a simple mode set on it for doing load-based monitor -+ * detection. -+ * -+ * It will be up to the load-detect code to adjust the pipe as appropriate for -+ * its requirements. The pipe will be connected to no other outputs. -+ * -+ * Currently this code will only succeed if there is a pipe with no outputs -+ * configured for it. In the future, it could choose to temporarily disable -+ * some outputs to free up a pipe for its use. -+ * -+ * \return crtc, or NULL if no pipes are available. -+ */ -+ -+/* VESA 640x480x72Hz mode to set on the pipe */ -+static struct drm_display_mode load_detect_mode = { -+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, -+ 704, 832, 0, 480, 489, 491, 520, 0, -+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), -+}; -+ -+struct drm_crtc *psb_intel_get_load_detect_pipe(struct psb_intel_output -+ *psb_intel_output, -+ struct drm_display_mode *mode, -+ int *dpms_mode) -+{ -+ struct psb_intel_crtc *psb_intel_crtc; -+ struct drm_crtc *possible_crtc; -+ struct drm_crtc *supported_crtc = NULL; -+ struct drm_encoder *encoder = &psb_intel_output->enc; -+ struct drm_crtc *crtc = NULL; -+ struct drm_device *dev = encoder->dev; -+ struct drm_encoder_helper_funcs *encoder_funcs = -+ encoder->helper_private; -+ struct drm_crtc_helper_funcs *crtc_funcs; -+ int i = -1; -+ -+ /* -+ * Algorithm gets a little messy: -+ * - if the connector already has an assigned crtc, use it (but make -+ * sure it's on first) -+ * - try to find the first unused crtc that can drive this connector, -+ * and use that if we find one -+ * - if there are no unused crtcs available, try to use the first -+ * one we found that supports the connector -+ */ -+ -+ /* See if we already have a CRTC for this connector */ -+ if (encoder->crtc) { -+ crtc = encoder->crtc; -+ /* Make sure the crtc and connector are running */ -+ psb_intel_crtc = to_psb_intel_crtc(crtc); -+ *dpms_mode = psb_intel_crtc->dpms_mode; -+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { -+ crtc_funcs = crtc->helper_private; -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); -+ } -+ return crtc; -+ } -+ -+ /* Find an unused one (if possible) */ -+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, -+ head) { -+ i++; -+ if (!(encoder->possible_crtcs & (1 << i))) -+ continue; -+ if (!possible_crtc->enabled) { -+ crtc = possible_crtc; -+ break; -+ } -+ if (!supported_crtc) -+ supported_crtc = possible_crtc; -+ } -+ -+ /* -+ * If we didn't find an unused CRTC, don't use any. -+ */ -+ if (!crtc) -+ return NULL; -+ -+ encoder->crtc = crtc; -+ psb_intel_output->load_detect_temp = true; -+ -+ psb_intel_crtc = to_psb_intel_crtc(crtc); -+ *dpms_mode = psb_intel_crtc->dpms_mode; -+ -+ if (!crtc->enabled) { -+ if (!mode) -+ mode = &load_detect_mode; -+ drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb); -+ } else { -+ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { -+ crtc_funcs = crtc->helper_private; -+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); -+ } -+ -+ /* Add this connector to the crtc */ -+ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode); -+ encoder_funcs->commit(encoder); -+ } -+ /* let the connector get through one full cycle before testing */ -+ psb_intel_wait_for_vblank(dev); -+ -+ return crtc; -+} -+ -+void psb_intel_release_load_detect_pipe(struct psb_intel_output *psb_intel_output, -+ int dpms_mode) -+{ -+ struct drm_encoder *encoder = &psb_intel_output->enc; -+ struct drm_device *dev = encoder->dev; -+ struct drm_crtc *crtc = encoder->crtc; -+ struct drm_encoder_helper_funcs *encoder_funcs = -+ encoder->helper_private; -+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; -+ -+ if (psb_intel_output->load_detect_temp) { -+ encoder->crtc = NULL; -+ psb_intel_output->load_detect_temp = false; -+ crtc->enabled = drm_helper_crtc_in_use(crtc); -+ drm_helper_disable_unused_functions(dev); -+ } -+ -+ /* Switch crtc and output back off if necessary */ -+ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { -+ if (encoder->crtc == crtc) -+ encoder_funcs->dpms(encoder, dpms_mode); -+ crtc_funcs->dpms(crtc, dpms_mode); -+ } -+} -+ -+/* Returns the clock of the currently programmed mode of the given pipe. */ -+static int psb_intel_crtc_clock_get(struct drm_device *dev, -+ struct drm_crtc *crtc) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ u32 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); -+ u32 fp; -+ struct psb_intel_clock_t clock; -+ -+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) -+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0); -+ else -+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1); -+ -+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; -+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; -+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; -+ if (IS_I9XX(dev)) { -+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> -+ DPLL_FPA01_P1_POST_DIV_SHIFT); -+ -+ switch (dpll & DPLL_MODE_MASK) { -+ case DPLLB_MODE_DAC_SERIAL: -+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? -+ 5 : 10; -+ break; -+ case DPLLB_MODE_LVDS: -+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? -+ 7 : 14; -+ break; -+ default: -+ DRM_DEBUG("Unknown DPLL mode %08x in programmed " -+ "mode\n", (int) (dpll & DPLL_MODE_MASK)); -+ return 0; -+ } -+ -+ /* XXX: Handle the 100Mhz refclk */ -+ i9xx_clock(96000, &clock); -+ } else { -+ bool is_lvds = (pipe == 1) -+ && (REG_READ(LVDS) & LVDS_PORT_EN); -+ -+ if (is_lvds) { -+ clock.p1 = -+ ffs((dpll & -+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> -+ DPLL_FPA01_P1_POST_DIV_SHIFT); -+ clock.p2 = 14; -+ -+ if ((dpll & PLL_REF_INPUT_MASK) == -+ PLLB_REF_INPUT_SPREADSPECTRUMIN) { -+ /* XXX: might not be 66MHz */ -+ i8xx_clock(66000, &clock); -+ } else -+ i8xx_clock(48000, &clock); -+ } else { -+ if (dpll & PLL_P1_DIVIDE_BY_TWO) -+ clock.p1 = 2; -+ else { -+ clock.p1 = -+ ((dpll & -+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >> -+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; -+ } -+ if (dpll & PLL_P2_DIVIDE_BY_4) -+ clock.p2 = 4; -+ else -+ clock.p2 = 2; -+ -+ i8xx_clock(48000, &clock); -+ } -+ } -+ -+ /* XXX: It would be nice to validate the clocks, but we can't reuse -+ * i830PllIsValid() because it relies on the xf86_config connector -+ * configuration being accurate, which it isn't necessarily. -+ */ -+ -+ return clock.dot; -+} -+ -+/** Returns the currently programmed mode of the given pipe. */ -+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, -+ struct drm_crtc *crtc) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ struct drm_display_mode *mode; -+ int htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); -+ int hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); -+ int vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); -+ int vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); -+ -+ mode = kzalloc(sizeof(*mode), GFP_KERNEL); -+ if (!mode) -+ return NULL; -+ -+ mode->clock = psb_intel_crtc_clock_get(dev, crtc); -+ mode->hdisplay = (htot & 0xffff) + 1; -+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1; -+ mode->hsync_start = (hsync & 0xffff) + 1; -+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; -+ mode->vdisplay = (vtot & 0xffff) + 1; -+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; -+ mode->vsync_start = (vsync & 0xffff) + 1; -+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; -+ -+ drm_mode_set_name(mode); -+ drm_mode_set_crtcinfo(mode, 0); -+ -+ return mode; -+} -+ -+static void psb_intel_crtc_destroy(struct drm_crtc *crtc) -+{ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ -+ drm_crtc_cleanup(crtc); -+ kfree(psb_intel_crtc); -+} -+ -+static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { -+ .dpms = psb_intel_crtc_dpms, -+ .mode_fixup = psb_intel_crtc_mode_fixup, -+ .mode_set = psb_intel_crtc_mode_set, -+ .mode_set_base = psb_intel_pipe_set_base, -+ .prepare = psb_intel_crtc_prepare, -+ .commit = psb_intel_crtc_commit, -+}; -+ -+static const struct drm_crtc_helper_funcs mrst_helper_funcs; -+ -+const struct drm_crtc_funcs psb_intel_crtc_funcs = { -+ .cursor_set = psb_intel_crtc_cursor_set, -+ .cursor_move = psb_intel_crtc_cursor_move, -+ .gamma_set = psb_intel_crtc_gamma_set, -+ .set_config = drm_crtc_helper_set_config, -+ .destroy = psb_intel_crtc_destroy, -+}; -+ -+ -+void psb_intel_crtc_init(struct drm_device *dev, int pipe, -+ struct psb_intel_mode_device *mode_dev) -+{ -+ struct psb_intel_crtc *psb_intel_crtc; -+ int i; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ /* We allocate a extra array of drm_connector pointers -+ * for fbdev after the crtc */ -+ psb_intel_crtc = -+ kzalloc(sizeof(struct psb_intel_crtc) + -+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), -+ GFP_KERNEL); -+ if (psb_intel_crtc == NULL) -+ return; -+ -+ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs); -+ -+ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); -+ psb_intel_crtc->pipe = pipe; -+ for (i = 0; i < 256; i++) { -+ psb_intel_crtc->lut_r[i] = i; -+ psb_intel_crtc->lut_g[i] = i; -+ psb_intel_crtc->lut_b[i] = i; -+ } -+ -+ psb_intel_crtc->mode_dev = mode_dev; -+ psb_intel_crtc->cursor_addr = 0; -+ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; -+ -+ if (IS_MRST(dev)) { -+ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs); -+ } else { -+ drm_crtc_helper_add(&psb_intel_crtc->base, -+ &psb_intel_helper_funcs); -+ } -+ -+ /* Setup the array of drm_connector pointer array */ -+ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; -+ psb_intel_crtc->mode_set.connectors = -+ (struct drm_connector **) (psb_intel_crtc + 1); -+ psb_intel_crtc->mode_set.num_connectors = 0; -+ -+#if 0 /* JB: not drop, What should go in here? */ -+ if (i915_fbpercrtc) -+#endif -+} -+ -+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) -+{ -+ struct drm_crtc *crtc = NULL; -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ if (psb_intel_crtc->pipe == pipe) -+ break; -+ } -+ return crtc; -+} -+ -+int psb_intel_connector_clones(struct drm_device *dev, int type_mask) -+{ -+ int index_mask = 0; -+ struct drm_connector *connector; -+ int entry = 0; -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ struct psb_intel_output *psb_intel_output = -+ to_psb_intel_output(connector); -+ if (type_mask & (1 << psb_intel_output->type)) -+ index_mask |= (1 << entry); -+ entry++; -+ } -+ return index_mask; -+} -+ -+#if 0 /* JB: Should be per device */ -+static void psb_intel_setup_outputs(struct drm_device *dev) -+{ -+ struct drm_connector *connector; -+ -+ psb_intel_crt_init(dev); -+ -+ /* Set up integrated LVDS */ -+ if (IS_MOBILE(dev) && !IS_I830(dev)) -+ psb_intel_lvds_init(dev); -+ -+ if (IS_I9XX(dev)) { -+ psb_intel_sdvo_init(dev, SDVOB); -+ psb_intel_sdvo_init(dev, SDVOC); -+ } else -+ psb_intel_dvo_init(dev); -+ -+ if (IS_I9XX(dev) && !IS_I915G(dev)) -+ psb_intel_tv_init(dev); -+ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ struct psb_intel_output *psb_intel_output = -+ to_psb_intel_output(connector); -+ struct drm_encoder *encoder = &psb_intel_output->enc; -+ int crtc_mask = 0, clone_mask = 0; -+ -+ /* valid crtcs */ -+ switch (psb_intel_output->type) { -+ case INTEL_OUTPUT_DVO: -+ case INTEL_OUTPUT_SDVO: -+ crtc_mask = ((1 << 0) | (1 << 1)); -+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | -+ (1 << INTEL_OUTPUT_DVO) | -+ (1 << INTEL_OUTPUT_SDVO)); -+ break; -+ case INTEL_OUTPUT_ANALOG: -+ crtc_mask = ((1 << 0) | (1 << 1)); -+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) | -+ (1 << INTEL_OUTPUT_DVO) | -+ (1 << INTEL_OUTPUT_SDVO)); -+ break; -+ case INTEL_OUTPUT_LVDS: -+ crtc_mask = (1 << 1); -+ clone_mask = (1 << INTEL_OUTPUT_LVDS); -+ break; -+ case INTEL_OUTPUT_TVOUT: -+ crtc_mask = ((1 << 0) | (1 << 1)); -+ clone_mask = (1 << INTEL_OUTPUT_TVOUT); -+ break; -+ } -+ encoder->possible_crtcs = crtc_mask; -+ encoder->possible_clones = -+ psb_intel_connector_clones(dev, clone_mask); -+ } -+} -+#endif -+ -+#if 0 /* JB: Rework framebuffer code into something none device specific */ -+static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb) -+{ -+ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb); -+ struct drm_device *dev = fb->dev; -+ -+ if (fb->fbdev) -+ intelfb_remove(dev, fb); -+ -+ drm_framebuffer_cleanup(fb); -+ drm_gem_object_unreference(fb->mm_private); -+ -+ kfree(psb_intel_fb); -+} -+ -+static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, -+ struct drm_file *file_priv, -+ unsigned int *handle) -+{ -+ struct drm_gem_object *object = fb->mm_private; -+ -+ return drm_gem_handle_create(file_priv, object, handle); -+} -+ -+static const struct drm_framebuffer_funcs psb_intel_fb_funcs = { -+ .destroy = psb_intel_user_framebuffer_destroy, -+ .create_handle = psb_intel_user_framebuffer_create_handle, -+}; -+ -+struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev, -+ struct drm_mode_fb_cmd -+ *mode_cmd, -+ void *mm_private) -+{ -+ struct psb_intel_framebuffer *psb_intel_fb; -+ -+ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL); -+ if (!psb_intel_fb) -+ return NULL; -+ -+ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs)) -+ return NULL; -+ -+ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd); -+ -+ return &psb_intel_fb->base; -+} -+ -+ -+static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct -+ drm_device -+ *dev, -+ struct -+ drm_file -+ *filp, -+ struct -+ drm_mode_fb_cmd -+ *mode_cmd) -+{ -+ struct drm_gem_object *obj; -+ -+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); -+ if (!obj) -+ return NULL; -+ -+ return psb_intel_framebuffer_create(dev, mode_cmd, obj); -+} -+ -+static int psb_intel_insert_new_fb(struct drm_device *dev, -+ struct drm_file *file_priv, -+ struct drm_framebuffer *fb, -+ struct drm_mode_fb_cmd *mode_cmd) -+{ -+ struct psb_intel_framebuffer *psb_intel_fb; -+ struct drm_gem_object *obj; -+ struct drm_crtc *crtc; -+ -+ psb_intel_fb = to_psb_intel_framebuffer(fb); -+ -+ mutex_lock(&dev->struct_mutex); -+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); -+ -+ if (!obj) { -+ mutex_unlock(&dev->struct_mutex); -+ return -EINVAL; -+ } -+ drm_gem_object_unreference(psb_intel_fb->base.mm_private); -+ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj); -+ mutex_unlock(&dev->struct_mutex); -+ -+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { -+ if (crtc->fb == fb) { -+ struct drm_crtc_helper_funcs *crtc_funcs = -+ crtc->helper_private; -+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y); -+ } -+ } -+ return 0; -+} -+ -+static const struct drm_mode_config_funcs psb_intel_mode_funcs = { -+ .resize_fb = psb_intel_insert_new_fb, -+ .fb_create = psb_intel_user_framebuffer_create, -+ .fb_changed = intelfb_probe, -+}; -+#endif -+ -+#if 0 /* Should be per device */ -+void psb_intel_modeset_init(struct drm_device *dev) -+{ -+ int num_pipe; -+ int i; -+ -+ drm_mode_config_init(dev); -+ -+ dev->mode_config.min_width = 0; -+ dev->mode_config.min_height = 0; -+ -+ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs; -+ -+ if (IS_I965G(dev)) { -+ dev->mode_config.max_width = 8192; -+ dev->mode_config.max_height = 8192; -+ } else { -+ dev->mode_config.max_width = 2048; -+ dev->mode_config.max_height = 2048; -+ } -+ -+ /* set memory base */ -+ if (IS_I9XX(dev)) -+ dev->mode_config.fb_base = -+ pci_resource_start(dev->pdev, 2); -+ else -+ dev->mode_config.fb_base = -+ pci_resource_start(dev->pdev, 0); -+ -+ if (IS_MOBILE(dev) || IS_I9XX(dev)) -+ num_pipe = 2; -+ else -+ num_pipe = 1; -+ DRM_DEBUG("%d display pipe%s available.\n", -+ num_pipe, num_pipe > 1 ? "s" : ""); -+ -+ for (i = 0; i < num_pipe; i++) -+ psb_intel_crtc_init(dev, i); -+ -+ psb_intel_setup_outputs(dev); -+ -+ /* setup fbs */ -+ /* drm_initial_config(dev, false); */ -+} -+#endif -+ -+void psb_intel_modeset_cleanup(struct drm_device *dev) -+{ -+ drm_mode_config_cleanup(dev); -+} -+ -+ -+/* current intel driver doesn't take advantage of encoders -+ always give back the encoder for the connector -+*/ -+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ return &psb_intel_output->enc; -+} -+ -+/* MRST_PLATFORM start */ -+ -+#if DUMP_REGISTER -+void dump_dc_registers(struct drm_device *dev) -+{ -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ unsigned int i = 0; -+ -+ DRM_INFO("jliu7 dump_dc_registers\n"); -+ -+ -+ if (0x80000000 & REG_READ(0x70008)) { -+ for (i = 0x20a0; i < 0x20af; i += 4) { -+ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0xf014; i < 0xf047; i += 4) { -+ DRM_INFO -+ ("jliu7 pipe A dpll register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x60000; i < 0x6005f; i += 4) { -+ DRM_INFO -+ ("jliu7 pipe A timing register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x61140; i < 0x61143; i += 4) { -+ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x61180; i < 0x6123F; i += 4) { -+ DRM_INFO -+ ("jliu7 LVDS PORT register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x61254; i < 0x612AB; i += 4) { -+ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x70000; i < 0x70047; i += 4) { -+ DRM_INFO -+ ("jliu7 PIPE A control register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x70180; i < 0x7020b; i += 4) { -+ DRM_INFO("jliu7 display A control register=0x%x," -+ "value=%x\n", i, -+ (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x71400; i < 0x71403; i += 4) { -+ DRM_INFO -+ ("jliu7 VGA Display Plane Control register=0x%x," -+ "value=%x\n", i, (unsigned int) REG_READ(i)); -+ } -+ } -+ -+ if (0x80000000 & REG_READ(0x71008)) { -+ for (i = 0x61000; i < 0x6105f; i += 4) { -+ DRM_INFO -+ ("jliu7 pipe B timing register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x71000; i < 0x71047; i += 4) { -+ DRM_INFO -+ ("jliu7 PIPE B control register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+ -+ for (i = 0x71180; i < 0x7120b; i += 4) { -+ DRM_INFO("jliu7 display B control register=0x%x," -+ "value=%x\n", i, -+ (unsigned int) REG_READ(i)); -+ } -+ } -+#if 0 -+ for (i = 0x70080; i < 0x700df; i += 4) { -+ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+ } -+#endif -+ -+} -+ -+void dump_dsi_registers(struct drm_device *dev) -+{ -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ unsigned int i = 0; -+ -+ DRM_INFO("jliu7 dump_dsi_registers\n"); -+ -+ for (i = 0xb000; i < 0xb064; i += 4) { -+ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i, -+ (unsigned int) REG_READ(i)); -+ } -+ -+ i = 0xb104; -+ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n", -+ i, (unsigned int) REG_READ(i)); -+} -+#endif /* DUMP_REGISTER */ -+ -+ -+struct mrst_limit_t { -+ struct psb_intel_range_t dot, m, p1; -+}; -+ -+struct mrst_clock_t { -+ /* derived values */ -+ int dot; -+ int m; -+ int p1; -+}; -+ -+#define MRST_LIMIT_LVDS_100L 0 -+#define MRST_LIMIT_LVDS_83 1 -+#define MRST_LIMIT_LVDS_100 2 -+ -+#define MRST_DOT_MIN 19750 -+#define MRST_DOT_MAX 120000 -+#define MRST_M_MIN_100L 20 -+#define MRST_M_MIN_100 10 -+#define MRST_M_MIN_83 12 -+#define MRST_M_MAX_100L 34 -+#define MRST_M_MAX_100 17 -+#define MRST_M_MAX_83 20 -+#define MRST_P1_MIN 2 -+#define MRST_P1_MAX_0 7 -+#define MRST_P1_MAX_1 8 -+ -+static const struct mrst_limit_t mrst_limits[] = { -+ { /* MRST_LIMIT_LVDS_100L */ -+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, -+ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L}, -+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, -+ }, -+ { /* MRST_LIMIT_LVDS_83L */ -+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, -+ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83}, -+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0}, -+ }, -+ { /* MRST_LIMIT_LVDS_100 */ -+ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX}, -+ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100}, -+ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1}, -+ }, -+}; -+ -+#define MRST_M_MIN 10 -+static const u32 mrst_m_converts[] = { -+ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C, -+ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25, -+ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c, -+}; -+ -+#define COUNT_MAX 0x10000000 -+void mrstWaitForPipeDisable(struct drm_device *dev) -+{ -+ int count, temp; -+ -+ /* FIXME JLIU7_PO */ -+ psb_intel_wait_for_vblank(dev); -+ return; -+ -+ /* Wait for for the pipe disable to take effect. */ -+ for (count = 0; count < COUNT_MAX; count++) { -+ temp = REG_READ(PIPEACONF); -+ if ((temp & PIPEACONF_PIPE_STATE) == 0) -+ break; -+ } -+ -+ if (count == COUNT_MAX) { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n"); -+#endif /* PRINT_JLIU7 */ -+ } else { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n", -+ count); -+#endif /* PRINT_JLIU7 */ -+ } -+} -+ -+void mrstWaitForPipeEnable(struct drm_device *dev) -+{ -+ int count, temp; -+ -+ /* FIXME JLIU7_PO */ -+ psb_intel_wait_for_vblank(dev); -+ return; -+ -+ /* Wait for for the pipe disable to take effect. */ -+ for (count = 0; count < COUNT_MAX; count++) { -+ temp = REG_READ(PIPEACONF); -+ if ((temp & PIPEACONF_PIPE_STATE) == 1) -+ break; -+ } -+ -+ if (count == COUNT_MAX) { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n"); -+#endif /* PRINT_JLIU7 */ -+ } else { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n", -+ count); -+#endif /* PRINT_JLIU7 */ -+ } -+} -+ -+static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc) -+{ -+ const struct mrst_limit_t *limit; -+ struct drm_device *dev = crtc->dev; -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ -+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) -+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) { -+ if (dev_priv->sku_100L) -+ limit = &mrst_limits[MRST_LIMIT_LVDS_100L]; -+ if (dev_priv->sku_83) -+ limit = &mrst_limits[MRST_LIMIT_LVDS_83]; -+ if (dev_priv->sku_100) -+ limit = &mrst_limits[MRST_LIMIT_LVDS_100]; -+ } else { -+ limit = NULL; -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n"); -+#endif /* PRINT_JLIU7 */ -+ } -+ -+ return limit; -+} -+ -+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ -+static void mrst_clock(int refclk, struct mrst_clock_t *clock) -+{ -+ clock->dot = (refclk * clock->m) / (14 * clock->p1); -+} -+ -+void mrstPrintPll(char *prefix, struct mrst_clock_t *clock) -+{ -+#if PRINT_JLIU7 -+ DRM_INFO -+ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n", -+ prefix, clock->dot, clock->m, clock->p1); -+#endif /* PRINT_JLIU7 */ -+} -+ -+/** -+ * Returns a set of divisors for the desired target clock with the given refclk, -+ * or FALSE. Divisor values are the actual divisors for -+ */ -+static bool -+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk, -+ struct mrst_clock_t *best_clock) -+{ -+ struct mrst_clock_t clock; -+ const struct mrst_limit_t *limit = mrst_limit(crtc); -+ int err = target; -+ -+ memset(best_clock, 0, sizeof(*best_clock)); -+ -+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { -+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; -+ clock.p1++) { -+ int this_err; -+ -+ mrst_clock(refclk, &clock); -+ -+ this_err = abs(clock.dot - target); -+ if (this_err < err) { -+ *best_clock = clock; -+ err = this_err; -+ } -+ } -+ } -+ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err); -+ -+ return err != target; -+} -+ -+/** -+ * Sets the power management mode of the pipe and plane. -+ * -+ * This code should probably grow support for turning the cursor off and back -+ * on appropriately at the same time as we're turning the pipe off/on. -+ */ -+static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode) -+{ -+ struct drm_device *dev = crtc->dev; -+ /* struct drm_i915_master_private *master_priv; */ -+ /* struct drm_i915_private *dev_priv = dev->dev_private; */ -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ int pipe = psb_intel_crtc->pipe; -+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B; -+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; -+ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE; -+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; -+ u32 temp; -+ bool enabled; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n", -+ mode, pipe); -+#endif /* PRINT_JLIU7 */ -+ -+ /* XXX: When our outputs are all unaware of DPMS modes other than off -+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. -+ */ -+ switch (mode) { -+ case DRM_MODE_DPMS_ON: -+ case DRM_MODE_DPMS_STANDBY: -+ case DRM_MODE_DPMS_SUSPEND: -+ /* Enable the DPLL */ -+ temp = REG_READ(dpll_reg); -+ if ((temp & DPLL_VCO_ENABLE) == 0) { -+ REG_WRITE(dpll_reg, temp); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ } -+ -+ /* Enable the pipe */ -+ temp = REG_READ(pipeconf_reg); -+ if ((temp & PIPEACONF_ENABLE) == 0) -+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); -+ -+ /* Enable the plane */ -+ temp = REG_READ(dspcntr_reg); -+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) { -+ REG_WRITE(dspcntr_reg, -+ temp | DISPLAY_PLANE_ENABLE); -+ /* Flush the plane changes */ -+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); -+ } -+ -+ psb_intel_crtc_load_lut(crtc); -+ -+ /* Give the overlay scaler a chance to enable -+ if it's on this pipe */ -+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ -+ break; -+ case DRM_MODE_DPMS_OFF: -+ /* Give the overlay scaler a chance to disable -+ * if it's on this pipe */ -+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ -+ -+ /* Disable the VGA plane that we never use */ -+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); -+ -+ /* Disable display plane */ -+ temp = REG_READ(dspcntr_reg); -+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) { -+ REG_WRITE(dspcntr_reg, -+ temp & ~DISPLAY_PLANE_ENABLE); -+ /* Flush the plane changes */ -+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); -+ REG_READ(dspbase_reg); -+ } -+ -+ if (!IS_I9XX(dev)) { -+ /* Wait for vblank for the disable to take effect */ -+ psb_intel_wait_for_vblank(dev); -+ } -+ -+ /* Next, disable display pipes */ -+ temp = REG_READ(pipeconf_reg); -+ if ((temp & PIPEACONF_ENABLE) != 0) { -+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); -+ REG_READ(pipeconf_reg); -+ } -+ -+ /* Wait for for the pipe disable to take effect. */ -+ mrstWaitForPipeDisable(dev); -+ -+ temp = REG_READ(dpll_reg); -+ if ((temp & DPLL_VCO_ENABLE) != 0) { -+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+ } -+ -+ /* Wait for the clocks to turn off. */ -+ udelay(150); -+ break; -+ } -+ -+#if DUMP_REGISTER -+ dump_dc_registers(dev); -+#endif /* DUMP_REGISTER */ -+ -+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; -+ -+#if 0 /* JB: Add vblank support later */ -+ if (enabled) -+ dev_priv->vblank_pipe |= (1 << pipe); -+ else -+ dev_priv->vblank_pipe &= ~(1 << pipe); -+#endif -+ -+ psb_intel_crtc->dpms_mode = mode; -+ -+#if 0 /* JB: Add sarea support later */ -+ if (!dev->primary->master) -+ return; -+ -+ master_priv = dev->primary->master->driver_priv; -+ if (!master_priv->sarea_priv) -+ return; -+ -+ switch (pipe) { -+ case 0: -+ master_priv->sarea_priv->planeA_w = -+ enabled ? crtc->mode.hdisplay : 0; -+ master_priv->sarea_priv->planeA_h = -+ enabled ? crtc->mode.vdisplay : 0; -+ break; -+ case 1: -+ master_priv->sarea_priv->planeB_w = -+ enabled ? crtc->mode.hdisplay : 0; -+ master_priv->sarea_priv->planeB_h = -+ enabled ? crtc->mode.vdisplay : 0; -+ break; -+ default: -+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); -+ break; -+ } -+#endif -+} -+ -+static int mrst_crtc_mode_set(struct drm_crtc *crtc, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode, -+ int x, int y, -+ struct drm_framebuffer *old_fb) -+{ -+ struct drm_device *dev = crtc->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ int pipe = psb_intel_crtc->pipe; -+ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0; -+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B; -+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; -+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; -+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; -+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; -+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; -+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; -+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; -+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; -+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; -+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; -+ int refclk = 0; -+ struct mrst_clock_t clock; -+ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport; -+ bool ok, is_sdvo = false; -+ bool is_crt = false, is_lvds = false, is_tv = false; -+ bool is_mipi = false; -+ struct drm_mode_config *mode_config = &dev->mode_config; -+ struct drm_connector *connector; -+ struct psb_intel_output *psb_intel_output; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ list_for_each_entry(connector, &mode_config->connector_list, head) { -+ psb_intel_output = to_psb_intel_output(connector); -+ -+ if (!connector->encoder -+ || connector->encoder->crtc != crtc) -+ continue; -+ -+ switch (psb_intel_output->type) { -+ case INTEL_OUTPUT_LVDS: -+ is_lvds = true; -+ break; -+ case INTEL_OUTPUT_SDVO: -+ is_sdvo = true; -+ break; -+ case INTEL_OUTPUT_TVOUT: -+ is_tv = true; -+ break; -+ case INTEL_OUTPUT_ANALOG: -+ is_crt = true; -+ break; -+ case INTEL_OUTPUT_MIPI: -+ is_mipi = true; -+ break; -+ } -+ } -+ -+ if (is_lvds | is_mipi) { -+ /*FIXME JLIU7 Get panel power delay parameters from -+ config data */ -+ REG_WRITE(0x61208, 0x25807d0); -+ REG_WRITE(0x6120c, 0x1f407d0); -+ REG_WRITE(0x61210, 0x270f04); -+ } -+ -+ /* Disable the VGA plane that we never use */ -+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); -+ -+ /* Disable the panel fitter if it was on our pipe */ -+ if (psb_intel_panel_fitter_pipe(dev) == pipe) -+ REG_WRITE(PFIT_CONTROL, 0); -+ -+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); -+ drm_mode_debug_printmodeline(mode); -+ -+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | -+ ((adjusted_mode->crtc_htotal - 1) << 16)); -+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | -+ ((adjusted_mode->crtc_hblank_end - 1) << 16)); -+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | -+ ((adjusted_mode->crtc_hsync_end - 1) << 16)); -+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | -+ ((adjusted_mode->crtc_vtotal - 1) << 16)); -+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | -+ ((adjusted_mode->crtc_vblank_end - 1) << 16)); -+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | -+ ((adjusted_mode->crtc_vsync_end - 1) << 16)); -+ /* pipesrc and dspsize control the size that is scaled from, -+ * which should always be the user's requested size. -+ */ -+ REG_WRITE(dspsize_reg, -+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); -+ REG_WRITE(pipesrc_reg, -+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); -+ -+ /* Flush the plane changes */ -+ { -+ struct drm_crtc_helper_funcs *crtc_funcs = -+ crtc->helper_private; -+ crtc_funcs->mode_set_base(crtc, x, y, old_fb); -+ } -+ -+ /* setup pipeconf */ -+ pipeconf = REG_READ(pipeconf_reg); -+ -+ /* Set up the display plane register */ -+ dspcntr = REG_READ(dspcntr_reg); -+ dspcntr |= DISPPLANE_GAMMA_ENABLE; -+ -+ if (pipe == 0) -+ dspcntr |= DISPPLANE_SEL_PIPE_A; -+ else -+ dspcntr |= DISPPLANE_SEL_PIPE_B; -+ -+ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE; -+ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE; -+ -+ if (is_mipi) -+ return 0; -+ -+ if (dev_priv->sku_100L) -+ refclk = 100000; -+ else if (dev_priv->sku_83) -+ refclk = 166000; -+ else if (dev_priv->sku_100) -+ refclk = 200000; -+ -+ dpll = 0; /*BIT16 = 0 for 100MHz reference */ -+ -+ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock); -+ -+ if (!ok) { -+#if 0 /* FIXME JLIU7 */ -+ DRM_ERROR("Couldn't find PLL settings for mode!\n"); -+ return; -+#endif /* FIXME JLIU7 */ -+#if PRINT_JLIU7 -+ DRM_INFO -+ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n"); -+#endif /* PRINT_JLIU7 */ -+ } else { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d," -+ "m = %x, p1 = %x. \n", clock.dot, clock.m, -+ clock.p1); -+#endif /* PRINT_JLIU7 */ -+ } -+ -+ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8; -+ -+ dpll |= DPLL_VGA_MODE_DIS; -+ -+ -+ dpll |= DPLL_VCO_ENABLE; -+ -+ if (is_lvds) -+ dpll |= DPLLA_MODE_LVDS; -+ else -+ dpll |= DPLLB_MODE_DAC_SERIAL; -+ -+ if (is_sdvo) { -+ int sdvo_pixel_multiply = -+ adjusted_mode->clock / mode->clock; -+ -+ dpll |= DPLL_DVO_HIGH_SPEED; -+ dpll |= -+ (sdvo_pixel_multiply - -+ 1) << SDVO_MULTIPLIER_SHIFT_HIRES; -+ } -+ -+ -+ /* compute bitmask from p1 value */ -+ dpll |= (1 << (clock.p1 - 2)) << 17; -+ -+ dpll |= DPLL_VCO_ENABLE; -+ -+#if PRINT_JLIU7 -+ mrstPrintPll("chosen", &clock); -+#endif /* PRINT_JLIU7 */ -+ -+#if 0 -+ if (!xf86ModesEqual(mode, adjusted_mode)) { -+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, -+ "Adjusted mode for pipe %c:\n", -+ pipe == 0 ? 'A' : 'B'); -+ xf86PrintModeline(pScrn->scrnIndex, mode); -+ } -+ i830PrintPll("chosen", &clock); -+#endif -+ -+ if (dpll & DPLL_VCO_ENABLE) { -+ REG_WRITE(fp_reg, fp); -+ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); -+ REG_READ(dpll_reg); -+/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */ -+ udelay(150); -+ } -+ -+ /* The LVDS pin pair needs to be on before the DPLLs are enabled. -+ * This is an exception to the general rule that mode_set doesn't turn -+ * things on. -+ */ -+ if (is_lvds) { -+ -+ /* FIXME JLIU7 need to support 24bit panel */ -+#if MRST_24BIT_LVDS -+ lvdsport = -+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN -+ | LVDS_A3_POWER_UP | LVDS_A0A2_CLKA_POWER_UP; -+ -+#if MRST_24BIT_DOT_1 -+ lvdsport |= MRST_PANEL_24_DOT_1_FORMAT; -+#endif /* MRST_24BIT_DOT_1 */ -+ -+#else /* MRST_24BIT_LVDS */ -+ lvdsport = -+ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN; -+#endif /* MRST_24BIT_LVDS */ -+ -+#if MRST_24BIT_WA -+ lvdsport = 0x80300340; -+#else /* MRST_24BIT_DOT_WA */ -+ lvdsport = 0x82300300; -+#endif /* MRST_24BIT_DOT_WA */ -+ -+ REG_WRITE(LVDS, lvdsport); -+ REG_READ(LVDS); -+ } -+ -+ REG_WRITE(fp_reg, fp); -+ REG_WRITE(dpll_reg, dpll); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ -+ /* write it again -- the BIOS does, after all */ -+ REG_WRITE(dpll_reg, dpll); -+ REG_READ(dpll_reg); -+ /* Wait for the clocks to stabilize. */ -+ udelay(150); -+ -+ REG_WRITE(pipeconf_reg, pipeconf); -+ REG_READ(pipeconf_reg); -+ -+ /* Wait for for the pipe enable to take effect. */ -+ mrstWaitForPipeEnable(dev); -+ -+ REG_WRITE(dspcntr_reg, dspcntr); -+ psb_intel_wait_for_vblank(dev); -+ -+ return 0; -+} -+ -+ -+static const struct drm_crtc_helper_funcs mrst_helper_funcs = { -+ .dpms = mrst_crtc_dpms, -+ .mode_fixup = psb_intel_crtc_mode_fixup, -+ .mode_set = mrst_crtc_mode_set, -+ .mode_set_base = psb_intel_pipe_set_base, -+ .prepare = psb_intel_crtc_prepare, -+ .commit = psb_intel_crtc_commit, -+}; -+ -+/* MRST_PLATFORM end */ -diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h ---- a/drivers/gpu/drm/psb/psb_intel_display.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_display.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,31 @@ -+ -+/* copyright (c) 2008, Intel Corporation -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ */ -+ -+#ifndef _INTEL_DISPLAY_H_ -+#define _INTEL_DISPLAY_H_ -+ -+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h ---- a/drivers/gpu/drm/psb/psb_intel_drv.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_drv.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,192 @@ -+/* -+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> -+ * Copyright (c) 2007 Intel Corporation -+ * Jesse Barnes <jesse.barnes@intel.com> -+ */ -+#ifndef __INTEL_DRV_H__ -+#define __INTEL_DRV_H__ -+ -+#include <linux/i2c.h> -+#include <linux/i2c-id.h> -+#include <linux/i2c-algo-bit.h> -+#include <drm/drm_crtc.h> -+ -+#include <drm/drm_crtc_helper.h> -+ -+/* -+ * MOORESTOWN defines -+ */ -+#define MRST_I2C 0 -+ -+#define DUMP_REGISTER 0 -+#define MRST_24BIT_LVDS 1 -+#define MRST_24BIT_DOT_1 0 -+#define MRST_24BIT_WA 0 -+ -+#define PRINT_JLIU7 0 -+#define DELAY_TIME1 80 /* 1000 = 1ms */ -+ -+/* -+ * Display related stuff -+ */ -+ -+/* store information about an Ixxx DVO */ -+/* The i830->i865 use multiple DVOs with multiple i2cs */ -+/* the i915, i945 have a single sDVO i2c bus - which is different */ -+#define MAX_OUTPUTS 6 -+/* maximum connectors per crtcs in the mode set */ -+#define INTELFB_CONN_LIMIT 4 -+ -+#define INTEL_I2C_BUS_DVO 1 -+#define INTEL_I2C_BUS_SDVO 2 -+ -+/* these are outputs from the chip - integrated only -+ * external chips are via DVO or SDVO output */ -+#define INTEL_OUTPUT_UNUSED 0 -+#define INTEL_OUTPUT_ANALOG 1 -+#define INTEL_OUTPUT_DVO 2 -+#define INTEL_OUTPUT_SDVO 3 -+#define INTEL_OUTPUT_LVDS 4 -+#define INTEL_OUTPUT_TVOUT 5 -+#define INTEL_OUTPUT_MIPI 6 -+ -+#define INTEL_DVO_CHIP_NONE 0 -+#define INTEL_DVO_CHIP_LVDS 1 -+#define INTEL_DVO_CHIP_TMDS 2 -+#define INTEL_DVO_CHIP_TVOUT 4 -+ -+/** -+ * Hold information useally put on the device driver privates here, -+ * since it needs to be shared across multiple of devices drivers privates. -+ */ -+struct psb_intel_mode_device { -+ -+ /* -+ * Abstracted memory manager operations -+ */ -+ void *(*bo_from_handle) (struct drm_device *dev, -+ struct drm_file *file_priv, -+ unsigned int handle); -+ size_t(*bo_size) (struct drm_device *dev, void *bo); -+ size_t(*bo_offset) (struct drm_device *dev, void *bo); -+ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo); -+ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo); -+ -+ /* -+ * Cursor -+ */ -+ int cursor_needs_physical; -+ -+ /* -+ * LVDS info -+ */ -+ int backlight_duty_cycle; /* restore backlight to this value */ -+ bool panel_wants_dither; -+ struct drm_display_mode *panel_fixed_mode; -+ struct drm_display_mode *vbt_mode; /* if any */ -+ -+ uint32_t saveBLC_PWM_CTL; -+}; -+ -+struct psb_intel_i2c_chan { -+ /* for getting at dev. private (mmio etc.) */ -+ struct drm_device *drm_dev; -+ u32 reg; /* GPIO reg */ -+ struct i2c_adapter adapter; -+ struct i2c_algo_bit_data algo; -+ u8 slave_addr; -+}; -+ -+struct psb_intel_output { -+ struct drm_connector base; -+ -+ struct drm_encoder enc; -+ int type; -+ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */ -+ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */ -+ bool load_detect_temp; -+ void *dev_priv; -+ -+ struct psb_intel_mode_device *mode_dev; -+ -+}; -+ -+struct psb_intel_crtc { -+ struct drm_crtc base; -+ int pipe; -+ int plane; -+ uint32_t cursor_addr; -+ u8 lut_r[256], lut_g[256], lut_b[256]; -+ int dpms_mode; -+ struct psb_intel_framebuffer *fbdev_fb; -+ /* a mode_set for fbdev users on this crtc */ -+ struct drm_mode_set mode_set; -+ -+ /* current bo we scanout from */ -+ void *scanout_bo; -+ -+ /* current bo we cursor from */ -+ void *cursor_bo; -+ -+ struct psb_intel_mode_device *mode_dev; -+}; -+ -+#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base) -+#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base) -+#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc) -+#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base) -+ -+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, -+ const u32 reg, const char *name); -+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan); -+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output); -+extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output); -+ -+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe, -+ struct psb_intel_mode_device *mode_dev); -+extern void psb_intel_crt_init(struct drm_device *dev); -+extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device); -+extern void psb_intel_dvo_init(struct drm_device *dev); -+extern void psb_intel_tv_init(struct drm_device *dev); -+extern void psb_intel_lvds_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev); -+extern void mrst_lvds_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev); -+extern void mrst_dsi_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev); -+ -+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc); -+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder); -+extern void psb_intel_encoder_commit(struct drm_encoder *encoder); -+ -+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector -+ *connector); -+ -+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, -+ struct drm_crtc *crtc); -+extern void psb_intel_wait_for_vblank(struct drm_device *dev); -+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, -+ int pipe); -+extern struct drm_crtc *psb_intel_get_load_detect_pipe -+ (struct psb_intel_output *psb_intel_output, -+ struct drm_display_mode *mode, int *dpms_mode); -+extern void psb_intel_release_load_detect_pipe(struct psb_intel_output -+ *psb_intel_output, int dpms_mode); -+ -+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, -+ int sdvoB); -+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector); -+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, -+ int enable); -+extern int intelfb_probe(struct drm_device *dev); -+extern int intelfb_remove(struct drm_device *dev, -+ struct drm_framebuffer *fb); -+extern void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, -+ u16 green, u16 blue, int regno); -+ -+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device -+ *dev, struct -+ drm_mode_fb_cmd -+ *mode_cmd, -+ void *mm_private); -+#endif /* __INTEL_DRV_H__ */ -diff -uNr a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c ---- a/drivers/gpu/drm/psb/psb_intel_dsi.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_dsi.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1644 @@ -+/* -+ * Copyright © 2006-2007 Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * jim liu <jim.liu@intel.com> -+ */ -+ -+#include <linux/backlight.h> -+#include <drm/drm_crtc.h> -+#include <drm/drm_edid.h> -+ -+#define DRM_MODE_ENCODER_MIPI 5 -+#define DRM_MODE_CONNECTOR_MIPI 13 -+ -+#if DUMP_REGISTER -+extern void dump_dsi_registers(struct drm_device *dev); -+#endif /* DUMP_REGISTER */ -+ -+int dsi_backlight; /* restore backlight to this value */ -+ -+/** -+ * Returns the maximum level of the backlight duty cycle field. -+ */ -+static u32 mrst_dsi_get_max_backlight(struct drm_device *dev) -+{ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ return BRIGHTNESS_MAX_LEVEL; -+ -+/* FIXME jliu7 need to revisit */ -+} -+ -+/** -+ * Sets the backlight level. -+ * -+ * \param level backlight level, from 0 to psb_intel_dsi_get_max_backlight(). -+ */ -+static void mrst_dsi_set_backlight(struct drm_device *dev, int level) -+{ -+ u32 blc_pwm_ctl; -+ u32 max_pwm_blc; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_set_backlight \n"); -+#endif /* PRINT_JLIU7 */ -+ -+#if 1 /* FIXME JLIU7 */ -+ return; -+#endif /* FIXME JLIU7 */ -+ -+ /* Provent LVDS going to total black */ -+ if (level < 20) -+ level = 20; -+ -+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev); -+ -+ if (max_pwm_blc ==0) -+ { -+ return; -+ } -+ -+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; -+ -+ if (blc_pol == BLC_POLARITY_INVERSE) { -+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl; -+ } -+ -+ REG_WRITE(BLC_PWM_CTL, -+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) | -+ blc_pwm_ctl); -+} -+ -+/** -+ * Sets the power state for the panel. -+ */ -+static void mrst_dsi_set_power(struct drm_device *dev, -+ struct psb_intel_output *output, bool on) -+{ -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ u32 pp_status; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n"); -+#endif /* PRINT_JLIU7 */ -+ /* -+ * The DIS device must be ready before we can change power state. -+ */ -+ if (!dev_priv->dsi_device_ready) -+ { -+ return; -+ } -+ -+ /* -+ * We don't support dual DSI yet. May be in POR in the future. -+ */ -+ if (dev_priv->dual_display) -+ { -+ return; -+ } -+ -+ if (on) { -+ if (dev_priv->dpi & (!dev_priv->dpi_panel_on)) -+ { -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n"); -+#endif /* PRINT_JLIU7 */ -+ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON); -+#if 0 /*FIXME JLIU7 */ -+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA); -+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON); -+#endif /*FIXME JLIU7 */ -+ -+ dev_priv->dpi_panel_on = true; -+ -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | -+ POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY); -+ } -+ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on)) -+ { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ dev_priv->DBI_CB_pointer = 0; -+ /* exit sleep mode */ -+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode; -+ -+#if 0 /*FIXME JLIU7 */ -+ /* Check MIPI Adatper command registers */ -+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); -+#endif /*FIXME JLIU7 */ -+ -+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ -+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); -+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); -+ -+ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another -+ command. This delay allows the supply voltages and clock circuits to stabilize */ -+ udelay(5000); -+ -+ dev_priv->DBI_CB_pointer = 0; -+ -+ /* set display on */ -+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ; -+ -+#if 0 /*FIXME JLIU7 */ -+ /* Check MIPI Adatper command registers */ -+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); -+#endif /*FIXME JLIU7 */ -+ -+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ -+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); -+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); -+ -+ dev_priv->dbi_panel_on = true; -+ } -+/*FIXME JLIU7 */ -+/* Need to figure out how to control the MIPI panel power on sequence*/ -+ -+ mrst_dsi_set_backlight(dev, dsi_backlight); -+ } -+ else -+ { -+ mrst_dsi_set_backlight(dev, 0); -+/*FIXME JLIU7 */ -+/* Need to figure out how to control the MIPI panel power down sequence*/ -+ /* -+ * Only save the current backlight value if we're going from -+ * on to off. -+ */ -+ if (dev_priv->dpi & dev_priv->dpi_panel_on) -+ { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & -+ ~POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while (pp_status & PP_ON); -+ -+#if 0 /*FIXME JLIU7 */ -+ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA); -+ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF); -+#endif /*FIXME JLIU7 */ -+ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN); -+ dev_priv->dpi_panel_on = false; -+ } -+ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on) -+ { -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n"); -+#endif /* PRINT_JLIU7 */ -+ dev_priv->DBI_CB_pointer = 0; -+ /* enter sleep mode */ -+ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode; -+ -+ /* Check MIPI Adatper command registers */ -+ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0); -+ -+ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/ -+ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1); -+ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0); -+ dev_priv->dbi_panel_on = false; -+ } -+ } -+} -+ -+static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (mode == DRM_MODE_DPMS_ON) -+ mrst_dsi_set_power(dev, output, true); -+ else -+ mrst_dsi_set_power(dev, output, false); -+ -+ /* XXX: We never power down the DSI pairs. */ -+} -+ -+static void mrst_dsi_save(struct drm_connector *connector) -+{ -+#if 0 /* JB: Disable for drop */ -+ struct drm_device *dev = connector->dev; -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_save \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ dev_priv->savePP_ON = REG_READ(LVDSPP_ON); -+ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF); -+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL); -+ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE); -+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); -+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & -+ BACKLIGHT_DUTY_CYCLE_MASK); -+ -+ /* -+ * make backlight to full brightness -+ */ -+ dsi_backlight = mrst_dsi_get_max_backlight(dev); -+#endif -+} -+ -+static void mrst_dsi_restore(struct drm_connector *connector) -+{ -+#if 0 /* JB: Disable for drop */ -+ struct drm_device *dev = connector->dev; -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_restore \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); -+ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON); -+ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF); -+ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE); -+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); -+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) -+ mrst_dsi_set_power(dev, true); -+ else -+ mrst_dsi_set_power(dev, false); -+#endif -+} -+ -+static void mrst_dsi_prepare(struct drm_encoder *encoder) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_mode_device *mode_dev = output->mode_dev; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); -+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & -+ BACKLIGHT_DUTY_CYCLE_MASK); -+ -+ mrst_dsi_set_power(dev, output, false); -+} -+ -+static void mrst_dsi_commit( struct drm_encoder *encoder) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_mode_device *mode_dev = output->mode_dev; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_commit \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (mode_dev->backlight_duty_cycle == 0) -+ mode_dev->backlight_duty_cycle = -+ mrst_dsi_get_max_backlight(dev); -+ -+ mrst_dsi_set_power(dev, output, true); -+ -+#if DUMP_REGISTER -+ dump_dsi_registers(dev); -+#endif /* DUMP_REGISTER */ -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHS_TX_timeoutCount -+ ` -+DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock -+ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended. -+ -+ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs). -+ To timeout this timer 1+ of the above said value is recommended. -+ -+\* ************************************************************************* */ -+static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ -+ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0; -+ -+ /* Total pixels need to be transfer per line*/ -+ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea; -+ -+ /* byte count = (pixel count * bits per pixel) / 8 */ -+ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8; -+ -+ if (dev_priv->videoModeFormat == BURST_MODE) -+ { -+ timeoutCount = HTOT_count + 1; -+#if 1 /*FIXME remove it after power-on */ -+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch -+ + dev_priv->VsyncWidth; -+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */ -+ timeoutCount = (HTOT_count * VTOT_count) + 1; -+#endif -+ } -+ else -+ { -+ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch -+ + dev_priv->VsyncWidth; -+ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */ -+ timeoutCount = (HTOT_count * VTOT_count) + 1; -+ } -+ -+ return timeoutCount & 0xFFFF; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetLP_RX_timeoutCount -+ -+DESCRIPTION: The timeout value is protocol specific. Time out value is calculated -+ from txclkesc(50ns). -+ -+ Minimum value = -+ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence) -+ + 8-bit trigger message (2x8xtxclkesc) -+ +1 txclksesc [stop_state] -+ = 21 X txclkesc [ 15h] -+ -+ Maximum Value = -+ Time to send a long packet with maximum payload data -+ = 4 X txclkesc [Escape mode entry sequence) -+ + 8-bit Low power data transmission Command (2x8xtxclkesc) -+ + packet header [ 4X8X2X txclkesc] -+ +payload [ nX8X2Xtxclkesc] -+ +CRC[2X8X2txclkesc] -+ +1 txclksesc [stop_state] -+ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc. -+ -+\* ************************************************************************* */ -+static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ -+ u32 timeoutCount = 0; -+ -+ if (dev_priv->config_phase) -+ { -+ /* Assuming 256 byte DDB data.*/ -+ timeoutCount = 117 + 256 * 16; -+ } -+ else -+ { -+ /* For DPI video only mode use the minimum value.*/ -+ timeoutCount = 0x15; -+#if 1 /*FIXME remove it after power-on */ -+ /* Assuming 256 byte DDB data.*/ -+ timeoutCount = 117 + 256 * 16; -+#endif -+ } -+ -+ return timeoutCount; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHSA_Count -+ -+DESCRIPTION: Shows the horizontal sync value in terms of byte clock -+ (txbyteclkhs) -+ Minimum HSA period should be sufficient to transmit a hsync start short -+ packet(4 bytes) -+ i) For Non-burst Mode with sync pulse, Min value – 4 in decimal [plus -+ an optional 6 bytes for a zero payload blanking packet]. But if -+ the value is less than 10 but more than 4, then this count will -+ be added to the HBP’s count for one lane. -+ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you -+ can program this to zero. If you program this register, these -+ byte values will be added to HBP. -+ iii) For Burst mode of operation, normally the values programmed in -+ terms of byte clock are based on the principle - time for transfering -+ HSA in Burst mode is the same as in non-bust mode. -+\* ************************************************************************* */ -+static u32 GetHSA_Count(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 HSA_count; -+ u32 HSA_countX8; -+ -+ /* byte clock count = (pixel clock count * bits per pixel) /8 */ -+ HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp; -+ -+ if (dev_priv->videoModeFormat == BURST_MODE) -+ { -+ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; -+ } -+ -+ HSA_count = HSA_countX8 / 8; -+ -+ return HSA_count; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHBP_Count -+ -+DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs. -+ Minimum HBP period should be sufficient to transmit a “hsync end short -+ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)” -+ For Burst mode of operation, normally the values programmed in terms of -+ byte clock are based on the principle - time for transfering HBP -+ in Burst mode is the same as in non-bust mode. -+ -+ Min value – 14 in decimal [ accounted with zero payload for blanking packet] for one lane. -+ Max value – any value greater than 14 based on DPI resolution -+\* ************************************************************************* */ -+static u32 GetHBP_Count(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 HBP_count; -+ u32 HBP_countX8; -+ -+ /* byte clock count = (pixel clock count * bits per pixel) /8 */ -+ HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp; -+ -+ if (dev_priv->videoModeFormat == BURST_MODE) -+ { -+ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; -+ } -+ -+ HBP_count = HBP_countX8 / 8; -+ -+ return HBP_count; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHFP_Count -+ -+DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs. -+ Minimum HFP period should be sufficient to transmit “RGB Data packet -+ footer(2 bytes) + Blanking packet overhead(6 bytes)” for non burst mode. -+ -+ For burst mode, Minimum HFP period should be sufficient to transmit -+ Blanking packet overhead(6 bytes)” -+ -+ For Burst mode of operation, normally the values programmed in terms of -+ byte clock are based on the principle - time for transfering HFP -+ in Burst mode is the same as in non-bust mode. -+ -+ Min value – 8 in decimal for non-burst mode [accounted with zero payload -+ for blanking packet] for one lane. -+ Min value – 6 in decimal for burst mode for one lane. -+ -+ Max value – any value greater than the minimum vaue based on DPI resolution -+\* ************************************************************************* */ -+static u32 GetHFP_Count(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 HFP_count; -+ u32 HFP_countX8; -+ -+ /* byte clock count = (pixel clock count * bits per pixel) /8 */ -+ HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp; -+ -+ if (dev_priv->videoModeFormat == BURST_MODE) -+ { -+ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; -+ } -+ -+ HFP_count = HFP_countX8 / 8; -+ -+ return HFP_count; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHAdr_Count -+ -+DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs. -+ In Non Burst Mode, Count equal to RGB word count value -+ -+ In Burst Mode, RGB pixel packets are time-compressed, leaving more time -+ during a scan line for LP mode (saving power) or for multiplexing -+ other transmissions onto the DSI link. Hence, the count equals the -+ time in txbyteclkhs for sending time compressed RGB pixels plus -+ the time needed for moving to power save mode or the time needed -+ for secondary channel to use the DSI link. -+ -+ But if the left out time for moving to low power mode is less than -+ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and -+ 6txbyteclkhs for a blanking packet with zero payload], then -+ this count will be added to the HFP's count for one lane. -+ -+ Min value – 8 in decimal for non-burst mode [accounted with zero payload -+ for blanking packet] for one lane. -+ Min value – 6 in decimal for burst mode for one lane. -+ -+ Max value – any value greater than the minimum vaue based on DPI resolution -+\* ************************************************************************* */ -+static u32 GetHAdr_Count(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 HAdr_count; -+ u32 HAdr_countX8; -+ -+ /* byte clock count = (pixel clock count * bits per pixel) /8 */ -+ HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp; -+ -+ if (dev_priv->videoModeFormat == BURST_MODE) -+ { -+ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated; -+ } -+ -+ HAdr_count = HAdr_countX8 / 8; -+ -+ return HAdr_count; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: GetHighLowSwitchCount -+ -+DESCRIPTION: High speed to low power or Low power to high speed switching time -+ in terms byte clock (txbyteclkhs). This value is based on the -+ byte clock (txbyteclkhs) and low power clock frequency (txclkesc) -+ -+ Typical value - Number of byte clocks required to switch from low power mode -+ to high speed mode after "txrequesths" is asserted. -+ -+ The worst count value among the low to high or high to low switching time -+ in terms of txbyteclkhs has to be programmed in this register. -+ -+ Usefull Formulae: -+ DDR clock period = 2 times UI -+ txbyteclkhs clock = 8 times UI -+ Tlpx = 1 / txclkesc -+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec) -+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI] -+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec) -+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx] -+\* ************************************************************************* */ -+static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount; -+ -+/* ************************************************************************* *\ -+ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec) -+ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx] -+ -+ Tlpx = 50 ns, Using max txclkesc (20MHz) -+ -+ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns -+ UI_period = 500 / dev_priv->DDR_Clock; in ns -+ -+ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx -+ = 9000 / dev_priv->DDR_Clock + 200; -+ -+ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period -+ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock) -+ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 -+ -+\* ************************************************************************* */ -+ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1; -+ -+/* ************************************************************************* *\ -+ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec) -+ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI] -+ -+ LP_to_HS = 10 * UI_period + 5 * Tlpx = -+ = 5000 / dev_priv->DDR_Clock + 250; -+ -+ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period -+ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock) -+ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 -+ -+\* ************************************************************************* */ -+ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1; -+ -+ if (HighToLowSwitchCount > LowToHighSwitchCount) -+ { -+ HighLowSwitchCount = HighToLowSwitchCount; -+ } -+ else -+ { -+ HighLowSwitchCount = LowToHighSwitchCount; -+ } -+ -+ -+ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */ -+ if (HighLowSwitchCount < 0x1f) -+ HighLowSwitchCount = 0x1f; -+ -+ return HighLowSwitchCount; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: mrst_gen_long_write -+ ` -+DESCRIPTION: -+ -+\* ************************************************************************* */ -+static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc) -+{ -+ u32 gen_data_reg = HS_GEN_DATA_REG; -+ u32 gen_ctrl_reg = HS_GEN_CTRL_REG; -+ u32 date_full_bit = HS_DATA_FIFO_FULL; -+ u32 control_full_bit = HS_CTRL_FIFO_FULL; -+ u16 wc_saved = wc; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_gen_long_write \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ /* sanity check */ -+ if (vc > 4) -+ { -+ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n"); -+ return; -+ } -+ -+ -+ if (0) /* FIXME JLIU7 check if it is in LP*/ -+ { -+ gen_data_reg = LP_GEN_DATA_REG; -+ gen_ctrl_reg = LP_GEN_CTRL_REG; -+ date_full_bit = LP_DATA_FIFO_FULL; -+ control_full_bit = LP_CTRL_FIFO_FULL; -+ } -+ -+ while (wc >= 4) -+ { -+ /* Check if MIPI IP generic data fifo is not full */ -+ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit); -+ -+ /* write to data buffer */ -+ REG_WRITE(gen_data_reg, *data); -+ -+ wc -= 4; -+ data ++; -+ } -+ -+ switch (wc) -+ { -+ case 1: -+ REG_WRITE8(gen_data_reg, *((u8 *)data)); -+ break; -+ case 2: -+ REG_WRITE16(gen_data_reg, *((u16 *)data)); -+ break; -+ case 3: -+ REG_WRITE16(gen_data_reg, *((u16 *)data)); -+ data = (u32*)((u8*) data + 2); -+ REG_WRITE8(gen_data_reg, *((u8 *)data)); -+ break; -+ } -+ -+ /* Check if MIPI IP generic control fifo is not full */ -+ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit); -+ /* write to control buffer */ -+ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6)); -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: mrst_init_HIMAX_MIPI_bridge -+ ` -+DESCRIPTION: -+ -+\* ************************************************************************* */ -+static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev) -+{ -+ u32 gen_data[2]; -+ u16 wc = 0; -+ u8 vc =0; -+ u32 gen_data_intel = 0x200105; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ /* exit sleep mode */ -+ wc = 0x5; -+ gen_data[0] = gen_data_intel | (0x11 << 24); -+ gen_data[1] = 0; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_pixel_format */ -+ gen_data[0] = gen_data_intel | (0x3A << 24); -+ gen_data[1] = 0x77; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* Set resolution for (800X480) */ -+ wc = 0x8; -+ gen_data[0] = gen_data_intel | (0x2A << 24); -+ gen_data[1] = 0x1F030000; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[0] = gen_data_intel | (0x2B << 24); -+ gen_data[1] = 0xDF010000; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* System control */ -+ wc = 0x6; -+ gen_data[0] = gen_data_intel | (0xEE << 24); -+ gen_data[1] = 0x10FA; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* INPUT TIMING FOR TEST PATTERN(800X480) */ -+ /* H-size */ -+ gen_data[1] = 0x2000; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0301; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* V-size */ -+ gen_data[1] = 0xE002; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0103; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* H-total */ -+ gen_data[1] = 0x2004; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0405; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* V-total */ -+ gen_data[1] = 0x0d06; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0207; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* H-blank */ -+ gen_data[1] = 0x0308; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0009; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* H-blank */ -+ gen_data[1] = 0x030A; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x000B; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* H-start */ -+ gen_data[1] = 0xD80C; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x000D; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* V-start */ -+ gen_data[1] = 0x230E; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x000F; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* RGB domain */ -+ gen_data[1] = 0x0027; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* INP_FORM Setting */ -+ /* set_1 */ -+ gen_data[1] = 0x1C10; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_2 */ -+ gen_data[1] = 0x0711; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_3 */ -+ gen_data[1] = 0x0012; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_4 */ -+ gen_data[1] = 0x0013; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_5 */ -+ gen_data[1] = 0x2314; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_6 */ -+ gen_data[1] = 0x0015; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_7 */ -+ gen_data[1] = 0x2316; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_8 */ -+ gen_data[1] = 0x0017; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_1 */ -+ gen_data[1] = 0x0330; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC Setting */ -+ /* FRC_set_2 */ -+ gen_data[1] = 0x237A; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC_set_3 */ -+ gen_data[1] = 0x4C7B; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC_set_4 */ -+ gen_data[1] = 0x037C; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC_set_5 */ -+ gen_data[1] = 0x3482; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC_set_7 */ -+ gen_data[1] = 0x1785; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+#if 0 -+ /* FRC_set_8 */ -+ gen_data[1] = 0xD08F; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+#endif -+ -+ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */ -+ /* out_htotal */ -+ gen_data[1] = 0x2090; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0491; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_hsync */ -+ gen_data[1] = 0x0392; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0093; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_hstart */ -+ gen_data[1] = 0xD894; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0095; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_hsize */ -+ gen_data[1] = 0x2096; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0397; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_vtotal */ -+ gen_data[1] = 0x0D98; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x0299; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_vsync */ -+ gen_data[1] = 0x039A; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x009B; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_vstart */ -+ gen_data[1] = 0x239C; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x009D; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* out_vsize */ -+ gen_data[1] = 0xE09E; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x019F; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* FRC_set_6 */ -+ gen_data[1] = 0x9084; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* Other setting */ -+ gen_data[1] = 0x0526; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* RBG domain */ -+ gen_data[1] = 0x1177; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* rgbw */ -+ /* set_1 */ -+ gen_data[1] = 0xD28F; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_2 */ -+ gen_data[1] = 0x02D0; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_3 */ -+ gen_data[1] = 0x08D1; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_4 */ -+ gen_data[1] = 0x05D2; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_5 */ -+ gen_data[1] = 0x24D4; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* set_6 */ -+ gen_data[1] = 0x00D5; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x02D7; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x00D8; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ gen_data[1] = 0x48F3; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0xD4F2; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x3D8E; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x60FD; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x00B5; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ gen_data[1] = 0x48F4; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+ -+ /* inside patten */ -+ gen_data[1] = 0x0060; -+ mrst_gen_long_write(dev, gen_data, wc, vc); -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: mrst_init_NSC_MIPI_bridge -+ ` -+DESCRIPTION: -+ -+\* ************************************************************************* */ -+static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev) -+{ -+ -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n"); -+#endif /* PRINT_JLIU7 */ -+ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event, -+ 1 or 2 Data Lanes */ -+ -+ udelay(DELAY_TIME1); -+ /* enable RGB24*/ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3); -+ -+ udelay(DELAY_TIME1); -+ /* enable all error reporting*/ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3); -+ udelay(DELAY_TIME1); -+ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3); -+ -+ udelay(DELAY_TIME1); -+ /* enable 2 data lane; video shaping & error reporting */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */ -+ -+ udelay(DELAY_TIME1); -+ /* HS timeout */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3); -+ -+ udelay(DELAY_TIME1); -+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3); -+ -+ /* enable all virtual channels */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3); -+ -+ /* set output strength to low-drive */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3); -+ -+ if (dev_priv->sku_83) -+ { -+ /* set escape clock to divede by 8 */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3); -+ } -+ else if(dev_priv->sku_100L) -+ { -+ /* set escape clock to divede by 16 */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3); -+ } -+ else if(dev_priv->sku_100) -+ { -+ /* set escape clock to divede by 32*/ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3); -+ -+ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3); -+ } -+ -+ /* CFG_VALID=1; RGB_CLK_EN=1. */ -+ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3); -+ -+} -+ -+static void mrst_dsi_mode_set(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ u32 pfit_control; -+ u32 dsiFuncPrgValue = 0; -+ u32 SupportedFormat = 0; -+ u32 channelNumber = 0; -+ u32 DBI_dataWidth = 0; -+ u32 resolution = 0; -+ u32 mipiport = 0; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ switch (dev_priv->bpp) -+ { -+ case 16: -+ SupportedFormat = RGB_565_FMT; -+ break; -+ case 18: -+ SupportedFormat = RGB_666_FMT; -+ break; -+ case 24: -+ SupportedFormat = RGB_888_FMT; -+ break; -+ default: -+ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n"); -+ break; -+ } -+ -+ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS); -+ -+ if (dev_priv->dpi) -+ { -+ /* Enable automatic panel scaling for non-native modes so that they fill -+ * the screen. Should be enabled before the pipe is enabled, according to -+ * register description and PRM. -+ */ -+ /*FIXME JLIU7, enable Auto-scale only */ -+ /* -+ * Enable automatic panel scaling so that non-native modes fill the -+ * screen. Should be enabled before the pipe is enabled, according to -+ * register description and PRM. -+ */ -+#if 0 /*JLIU7_PO */ -+ if (mode->hdisplay != adjusted_mode->hdisplay || -+ mode->vdisplay != adjusted_mode->vdisplay) -+ { -+ pfit_control = PFIT_ENABLE; -+ } -+ else -+#endif /*JLIU7_PO */ -+ { -+ pfit_control = 0; -+ } -+ REG_WRITE(PFIT_CONTROL, pfit_control); -+ -+ /* Enable MIPI Port */ -+ mipiport = MIPI_PORT_EN; -+ REG_WRITE(MIPI, mipiport); -+ -+ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */ -+ REG_WRITE(MIPI_CONTROL_REG, 0x00000018); -+ -+ /* Enable all the error interrupt */ -+ REG_WRITE(INTR_EN_REG, 0xffffffff); -+ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000F); -+ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/ -+ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */ -+ -+ SupportedFormat <<= FMT_DPI_POS; -+ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat; -+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue); -+ -+ REG_WRITE(DPI_RESOLUTION_REG, resolution); -+ REG_WRITE(DBI_RESOLUTION_REG, 0x00000000); -+ -+ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth); -+ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch); -+ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch); -+ -+#if 1 /*JLIU7_PO hard coded for NSC PO */ -+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, 0x1e); -+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, 0x18); -+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, 0x8); -+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, 0x4b0); -+#else /*JLIU7_PO hard coded for NSC PO */ -+ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, GetHSA_Count(dev_priv)); -+ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, GetHBP_Count(dev_priv)); -+ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, GetHFP_Count(dev_priv)); -+ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, GetHAdr_Count(dev_priv)); -+#endif /*JLIU7_PO hard coded for NSC PO */ -+ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat); -+ } -+ else -+ { -+ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/ -+ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS; -+ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS; -+ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth; -+ /* JLIU7 FIXME */ -+ SupportedFormat <<= FMT_DBI_POS; -+ dsiFuncPrgValue |= SupportedFormat; -+ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue); -+ -+ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000); -+ REG_WRITE(DBI_RESOLUTION_REG, resolution); -+ } -+ -+#if 1 /*JLIU7_PO hard code for NSC PO */ -+ REG_WRITE(HS_TX_TIMEOUT_REG, 0xffff); -+ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff); -+ -+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46); -+#else /*JLIU7_PO hard code for NSC PO */ -+ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv)); -+ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv)); -+ -+ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv)); -+#endif /*JLIU7_PO hard code for NSC PO */ -+ -+ -+ REG_WRITE(EOT_DISABLE_REG, 0x00000000); -+ -+ /* FIXME JLIU7 for NSC PO */ -+ REG_WRITE(LP_BYTECLK_REG, 0x00000004); -+ -+ REG_WRITE(DEVICE_READY_REG, 0x00000001); -+ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */ -+ -+ dev_priv->dsi_device_ready = true; -+ -+#if 0 /*JLIU7_PO */ -+ mrst_init_HIMAX_MIPI_bridge(dev); -+#endif /*JLIU7_PO */ -+ mrst_init_NSC_MIPI_bridge(dev); -+ -+ if (dev_priv->sku_100L) -+ /* Set DSI link to 100MHz; 2:1 clock ratio */ -+ REG_WRITE(MIPI_CONTROL_REG, 0x00000009); -+ -+ REG_WRITE(PIPEACONF, dev_priv->pipeconf); -+ REG_READ(PIPEACONF); -+ -+ /* Wait for 20ms for the pipe enable to take effect. */ -+ udelay(20000); -+ -+ /* JLIU7_PO hard code for NSC PO Program the display FIFO watermarks */ -+ REG_WRITE(DSPARB, 0x00001d9c); -+ REG_WRITE(DSPFW1, 0xfc0f0f18); -+ REG_WRITE(DSPFW5, 0x04140404); -+ REG_WRITE(DSPFW6, 0x000001f0); -+ -+ REG_WRITE(DSPACNTR, dev_priv->dspcntr); -+ -+ /* Wait for 20ms for the plane enable to take effect. */ -+ udelay(20000); -+} -+ -+/** -+ * Detect the MIPI connection. -+ * -+ * This always returns CONNECTOR_STATUS_CONNECTED. -+ * This connector should only have -+ * been set up if the MIPI was actually connected anyway. -+ */ -+static enum drm_connector_status mrst_dsi_detect(struct drm_connector -+ *connector) -+{ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_detect \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ return connector_status_connected; -+} -+ -+/** -+ * Return the list of MIPI DDB modes if available. -+ */ -+static int mrst_dsi_get_modes(struct drm_connector *connector) -+{ -+ struct drm_device *dev = connector->dev; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev; -+ -+/* FIXME get the MIPI DDB modes */ -+ -+ /* Didn't get an DDB, so -+ * Set wide sync ranges so we get all modes -+ * handed to valid_mode for checking -+ */ -+ connector->display_info.min_vfreq = 0; -+ connector->display_info.max_vfreq = 200; -+ connector->display_info.min_hfreq = 0; -+ connector->display_info.max_hfreq = 200; -+ -+ if (mode_dev->panel_fixed_mode != NULL) { -+ struct drm_display_mode *mode = -+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); -+ drm_mode_probed_add(connector, mode); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = { -+ .dpms = mrst_dsi_dpms, -+ .mode_fixup = psb_intel_lvds_mode_fixup, -+ .prepare = mrst_dsi_prepare, -+ .mode_set = mrst_dsi_mode_set, -+ .commit = mrst_dsi_commit, -+}; -+ -+static const struct drm_connector_helper_funcs -+ mrst_dsi_connector_helper_funcs = { -+ .get_modes = mrst_dsi_get_modes, -+ .mode_valid = psb_intel_lvds_mode_valid, -+ .best_encoder = psb_intel_best_encoder, -+}; -+ -+static const struct drm_connector_funcs mrst_dsi_connector_funcs = { -+ .save = mrst_dsi_save, -+ .restore = mrst_dsi_restore, -+ .detect = mrst_dsi_detect, -+ .fill_modes = drm_helper_probe_single_connector_modes, -+ .destroy = psb_intel_lvds_destroy, -+}; -+ -+/** Returns the panel fixed mode from configuration. */ -+/** FIXME JLIU7 need to revist it. */ -+struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev) -+{ -+ struct drm_display_mode *mode; -+ -+ mode = kzalloc(sizeof(*mode), GFP_KERNEL); -+ if (!mode) -+ return NULL; -+ -+#if 1 /*FIXME jliu7 remove it later */ -+ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */ -+ mode->hdisplay = 800; -+ mode->vdisplay = 480; -+ mode->hsync_start = 808; -+ mode->hsync_end = 848; -+ mode->htotal = 880; -+ mode->vsync_start = 482; -+ mode->vsync_end = 483; -+ mode->vtotal = 486; -+ mode->clock = 33264; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */ -+ mode->hdisplay = 800; -+ mode->vdisplay = 480; -+ mode->hsync_start = 836; -+ mode->hsync_end = 846; -+ mode->htotal = 1056; -+ mode->vsync_start = 489; -+ mode->vsync_end = 491; -+ mode->vtotal = 525; -+ mode->clock = 33264; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 800x480 */ -+ mode->hdisplay = 800; -+ mode->vdisplay = 480; -+ mode->hsync_start = 801; -+ mode->hsync_end = 802; -+ mode->htotal = 1024; -+ mode->vsync_start = 481; -+ mode->vsync_end = 482; -+ mode->vtotal = 525; -+ mode->clock = 30994; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */ -+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1072; -+ mode->hsync_end = 1104; -+ mode->htotal = 1184; -+ mode->vsync_start = 603; -+ mode->vsync_end = 604; -+ mode->vtotal = 608; -+ mode->clock = 53990; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */ -+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1104; -+ mode->hsync_end = 1136; -+ mode->htotal = 1184; -+ mode->vsync_start = 603; -+ mode->vsync_end = 604; -+ mode->vtotal = 608; -+ mode->clock = 53990; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1124; -+ mode->hsync_end = 1204; -+ mode->htotal = 1312; -+ mode->vsync_start = 607; -+ mode->vsync_end = 610; -+ mode->vtotal = 621; -+ mode->clock = 48885; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 1024x768 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 768; -+ mode->hsync_start = 1048; -+ mode->hsync_end = 1184; -+ mode->htotal = 1344; -+ mode->vsync_start = 771; -+ mode->vsync_end = 777; -+ mode->vtotal = 806; -+ mode->clock = 65000; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 1366x768 */ -+ mode->hdisplay = 1366; -+ mode->vdisplay = 768; -+ mode->hsync_start = 1430; -+ mode->hsync_end = 1558; -+ mode->htotal = 1664; -+ mode->vsync_start = 769; -+ mode->vsync_end = 770; -+ mode->vtotal = 776; -+ mode->clock = 77500; -+#endif /*FIXME jliu7 remove it later */ -+ -+ drm_mode_set_name(mode); -+ drm_mode_set_crtcinfo(mode, 0); -+ -+ return mode; -+} -+ -+/* ************************************************************************* *\ -+FUNCTION: mrstDSI_clockInit -+ ` -+DESCRIPTION: -+ -+\* ************************************************************************* */ -+static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667}; -+static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000}; -+static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000}; -+#define MIPI_2XCLK_COUNT 0x04 -+ -+static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv) -+{ -+ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0; -+ u32 i = 0; -+ u32 *p_mipi_2xclk = NULL; -+ -+ (void)GetHS_TX_timeoutCount; -+ (void)GetLP_RX_timeoutCount; -+ (void)GetHSA_Count; -+ (void)GetHBP_Count; -+ (void)GetHFP_Count; -+ (void)GetHAdr_Count; -+ (void)GetHighLowSwitchCount; -+ (void)mrst_init_HIMAX_MIPI_bridge; -+ -+#if 0 /* JLIU7_PO old values */ -+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */ -+ dev_priv->pixelClock = 33264; /*KHz*/ -+ dev_priv->HsyncWidth = 10; -+ dev_priv->HbackPorch = 210; -+ dev_priv->HfrontPorch = 36; -+ dev_priv->HactiveArea = 800; -+ dev_priv->VsyncWidth = 2; -+ dev_priv->VbackPorch = 34; -+ dev_priv->VfrontPorch = 9; -+ dev_priv->VactiveArea = 480; -+ dev_priv->bpp = 24; -+ -+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */ -+ dev_priv->dbi_pixelClock = 33264; /*KHz*/ -+ dev_priv->dbi_HsyncWidth = 10; -+ dev_priv->dbi_HbackPorch = 210; -+ dev_priv->dbi_HfrontPorch = 36; -+ dev_priv->dbi_HactiveArea = 800; -+ dev_priv->dbi_VsyncWidth = 2; -+ dev_priv->dbi_VbackPorch = 34; -+ dev_priv->dbi_VfrontPorch = 9; -+ dev_priv->dbi_VactiveArea = 480; -+ dev_priv->dbi_bpp = 24; -+#else /* JLIU7_PO old values */ -+ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */ -+ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */ -+ dev_priv->pixelClock = 33264; /*KHz*/ -+ dev_priv->HsyncWidth = 10; -+ dev_priv->HbackPorch = 8; -+ dev_priv->HfrontPorch = 3; -+ dev_priv->HactiveArea = 800; -+ dev_priv->VsyncWidth = 2; -+ dev_priv->VbackPorch = 3; -+ dev_priv->VfrontPorch = 2; -+ dev_priv->VactiveArea = 480; -+ dev_priv->bpp = 24; -+ -+ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */ -+ dev_priv->dbi_pixelClock = 33264; /*KHz*/ -+ dev_priv->dbi_HsyncWidth = 10; -+ dev_priv->dbi_HbackPorch = 8; -+ dev_priv->dbi_HfrontPorch = 3; -+ dev_priv->dbi_HactiveArea = 800; -+ dev_priv->dbi_VsyncWidth = 2; -+ dev_priv->dbi_VbackPorch = 3; -+ dev_priv->dbi_VfrontPorch = 2; -+ dev_priv->dbi_VactiveArea = 480; -+ dev_priv->dbi_bpp = 24; -+#endif /* JLIU7_PO old values */ -+ -+ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea; -+ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea; -+ -+ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1; -+ -+ dev_priv->RRate = RRate; -+ -+ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/ -+ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */ -+ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */ -+ -+ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk); -+ -+ if (dev_priv->sku_100) -+ { -+ p_mipi_2xclk = sku_100_mipi_2xclk; -+ } -+ else if (dev_priv->sku_100L) -+ { -+ p_mipi_2xclk = sku_100L_mipi_2xclk; -+ } -+ else -+ { -+ p_mipi_2xclk = sku_83_mipi_2xclk; -+ } -+ -+ for (; i < MIPI_2XCLK_COUNT; i++) -+ { -+ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i]) -+ break; -+ } -+ -+ if (i == MIPI_2XCLK_COUNT) -+ { -+ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated); -+ return false; -+ } -+ -+ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2; -+ dev_priv->ClockBits = i; -+ -+#if 0 /*JLIU7_PO */ -+#if 0 /* FIXME remove it after power on*/ -+ mipiControlReg = REG_READ(MIPI_CONTROL_REG) & (~MIPI_2X_CLOCK_BITS); -+ mipiControlReg |= i; -+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg); -+#else /* FIXME remove it after power on*/ -+ mipiControlReg |= i; -+ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg); -+#endif /* FIXME remove it after power on*/ -+#endif /*JLIU7_PO */ -+ -+#if 1 /* FIXME remove it after power on*/ -+ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated); -+#endif /* FIXME remove it after power on*/ -+ -+ return true; -+} -+ -+/** -+ * mrst_dsi_init - setup MIPI connectors on this device -+ * @dev: drm device -+ * -+ * Create the connector, try to figure out what -+ * modes we can display on the MIPI panel (if present). -+ */ -+void mrst_dsi_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev) -+{ -+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private; -+ struct psb_intel_output *psb_intel_output; -+ struct drm_connector *connector; -+ struct drm_encoder *encoder; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_dsi_init \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); -+ if (!psb_intel_output) -+ return; -+ -+ psb_intel_output->mode_dev = mode_dev; -+ connector = &psb_intel_output->base; -+ encoder = &psb_intel_output->enc; -+ drm_connector_init(dev, &psb_intel_output->base, -+ &mrst_dsi_connector_funcs, -+ DRM_MODE_CONNECTOR_MIPI); -+ -+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, -+ DRM_MODE_ENCODER_MIPI); -+ -+ drm_mode_connector_attach_encoder(&psb_intel_output->base, -+ &psb_intel_output->enc); -+ psb_intel_output->type = INTEL_OUTPUT_MIPI; -+ -+ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs); -+ drm_connector_helper_add(connector, -+ &mrst_dsi_connector_helper_funcs); -+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; -+ connector->interlace_allowed = false; -+ connector->doublescan_allowed = false; -+ -+ dsi_backlight = BRIGHTNESS_MAX_LEVEL; -+ blc_pol = BLC_POLARITY_INVERSE; -+ blc_freq = 0xc8; -+ -+ /* -+ * MIPI discovery: -+ * 1) check for DDB data -+ * 2) check for VBT data -+ * 4) make sure lid is open -+ * if closed, act like it's not there for now -+ */ -+ -+ /* FIXME jliu7 we only support DPI */ -+ dev_priv->dpi = true; -+ -+ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */ -+ dev_priv->laneCount = 2; -+ -+ /* FIXME hard coded for NSC PO. */ -+ /* We only support BUST_MODE */ -+ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */ -+ /* FIXME change it to true if GET_DDB works */ -+ dev_priv->config_phase = false; -+ -+ if (!mrstDSI_clockInit(dev_priv)) -+ { -+ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n"); -+#if 0 /* FIXME JLIU7 */ -+ goto failed_find; -+#endif /* FIXME JLIU7 */ -+ } -+ -+ /* -+ * If we didn't get DDB data, try geting panel timing -+ * from configuration data -+ */ -+ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev); -+ -+ if (mode_dev->panel_fixed_mode) { -+ mode_dev->panel_fixed_mode->type |= -+ DRM_MODE_TYPE_PREFERRED; -+ goto out; /* FIXME: check for quirks */ -+ } -+ -+ /* If we still don't have a mode after all that, give up. */ -+ if (!mode_dev->panel_fixed_mode) { -+ DRM_DEBUG -+ ("Found no modes on the lvds, ignoring the LVDS\n"); -+ goto failed_find; -+ } -+ -+out: -+ drm_sysfs_connector_add(connector); -+ return; -+ -+failed_find: -+ DRM_DEBUG("No MIIP modes found, disabling.\n"); -+ drm_encoder_cleanup(encoder); -+ drm_connector_cleanup(connector); -+ kfree(connector); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c ---- a/drivers/gpu/drm/psb/psb_intel_i2c.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_i2c.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,179 @@ -+/* -+ * Copyright © 2006-2007 Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ */ -+/* -+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> -+ * Jesse Barnes <jesse.barnes@intel.com> -+ */ -+ -+#include <linux/i2c.h> -+#include <linux/i2c-id.h> -+#include <linux/i2c-algo-bit.h> -+ -+/* -+ * Intel GPIO access functions -+ */ -+ -+#define I2C_RISEFALL_TIME 20 -+ -+static int get_clock(void *data) -+{ -+ struct psb_intel_i2c_chan *chan = data; -+ struct drm_device *dev = chan->drm_dev; -+ u32 val; -+ -+ val = REG_READ(chan->reg); -+ return (val & GPIO_CLOCK_VAL_IN) != 0; -+} -+ -+static int get_data(void *data) -+{ -+ struct psb_intel_i2c_chan *chan = data; -+ struct drm_device *dev = chan->drm_dev; -+ u32 val; -+ -+ val = REG_READ(chan->reg); -+ return (val & GPIO_DATA_VAL_IN) != 0; -+} -+ -+static void set_clock(void *data, int state_high) -+{ -+ struct psb_intel_i2c_chan *chan = data; -+ struct drm_device *dev = chan->drm_dev; -+ u32 reserved = 0, clock_bits; -+ -+ /* On most chips, these bits must be preserved in software. */ -+ if (!IS_I830(dev) && !IS_845G(dev)) -+ reserved = -+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | -+ GPIO_CLOCK_PULLUP_DISABLE); -+ -+ if (state_high) -+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; -+ else -+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | -+ GPIO_CLOCK_VAL_MASK; -+ REG_WRITE(chan->reg, reserved | clock_bits); -+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ -+} -+ -+static void set_data(void *data, int state_high) -+{ -+ struct psb_intel_i2c_chan *chan = data; -+ struct drm_device *dev = chan->drm_dev; -+ u32 reserved = 0, data_bits; -+ -+ /* On most chips, these bits must be preserved in software. */ -+ if (!IS_I830(dev) && !IS_845G(dev)) -+ reserved = -+ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | -+ GPIO_CLOCK_PULLUP_DISABLE); -+ -+ if (state_high) -+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; -+ else -+ data_bits = -+ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | -+ GPIO_DATA_VAL_MASK; -+ -+ REG_WRITE(chan->reg, reserved | data_bits); -+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ -+} -+ -+/** -+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg -+ * @dev: DRM device -+ * @output: driver specific output device -+ * @reg: GPIO reg to use -+ * @name: name for this bus -+ * -+ * Creates and registers a new i2c bus with the Linux i2c layer, for use -+ * in output probing and control (e.g. DDC or SDVO control functions). -+ * -+ * Possible values for @reg include: -+ * %GPIOA -+ * %GPIOB -+ * %GPIOC -+ * %GPIOD -+ * %GPIOE -+ * %GPIOF -+ * %GPIOG -+ * %GPIOH -+ * see PRM for details on how these different busses are used. -+ */ -+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, -+ const u32 reg, const char *name) -+{ -+ struct psb_intel_i2c_chan *chan; -+ -+ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL); -+ if (!chan) -+ goto out_free; -+ -+ chan->drm_dev = dev; -+ chan->reg = reg; -+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); -+ chan->adapter.owner = THIS_MODULE; -+ chan->adapter.algo_data = &chan->algo; -+ chan->adapter.dev.parent = &dev->pdev->dev; -+ chan->algo.setsda = set_data; -+ chan->algo.setscl = set_clock; -+ chan->algo.getsda = get_data; -+ chan->algo.getscl = get_clock; -+ chan->algo.udelay = 20; -+ chan->algo.timeout = usecs_to_jiffies(2200); -+ chan->algo.data = chan; -+ -+ i2c_set_adapdata(&chan->adapter, chan); -+ -+ if (i2c_bit_add_bus(&chan->adapter)) -+ goto out_free; -+ -+ /* JJJ: raise SCL and SDA? */ -+ set_data(chan, 1); -+ set_clock(chan, 1); -+ udelay(20); -+ -+ return chan; -+ -+out_free: -+ kfree(chan); -+ return NULL; -+} -+ -+/** -+ * psb_intel_i2c_destroy - unregister and free i2c bus resources -+ * @output: channel to free -+ * -+ * Unregister the adapter from the i2c layer, then free the structure. -+ */ -+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan) -+{ -+ if (!chan) -+ return; -+ -+ i2c_del_adapter(&chan->adapter); -+ kfree(chan); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c ---- a/drivers/gpu/drm/psb/psb_intel_lvds.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_lvds.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1015 @@ -+/* -+ * Copyright © 2006-2007 Intel Corporation -+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ * Dave Airlie <airlied@linux.ie> -+ * Jesse Barnes <jesse.barnes@intel.com> -+ */ -+ -+#include <linux/i2c.h> -+#include <drm/drm_crtc.h> -+#include <drm/drm_edid.h> -+/* MRST defines start */ -+uint8_t blc_type; -+uint8_t blc_pol; -+uint8_t blc_freq; -+uint8_t blc_minbrightness; -+uint8_t blc_i2caddr; -+uint8_t blc_brightnesscmd; -+int lvds_backlight; /* restore backlight to this value */ -+ -+u32 CoreClock; -+u32 PWMControlRegFreq; -+/* MRST defines end */ -+ -+/** -+ * Sets the backlight level. -+ * -+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight(). -+ */ -+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level) -+{ -+ u32 blc_pwm_ctl; -+ -+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; -+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | -+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); -+} -+ -+/** -+ * Returns the maximum level of the backlight duty cycle field. -+ */ -+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev) -+{ -+ return ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> -+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; -+} -+ -+/** -+ * Sets the power state for the panel. -+ */ -+static void psb_intel_lvds_set_power(struct drm_device *dev, -+ struct psb_intel_output *output, bool on) -+{ -+ u32 pp_status; -+ -+ if (on) { -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | -+ POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while ((pp_status & PP_ON) == 0); -+ -+ psb_intel_lvds_set_backlight(dev, -+ output-> -+ mode_dev->backlight_duty_cycle); -+ } else { -+ psb_intel_lvds_set_backlight(dev, 0); -+ -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & -+ ~POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while (pp_status & PP_ON); -+ } -+} -+ -+static void psb_intel_lvds_dpms(struct drm_encoder *encoder, int mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ -+ if (mode == DRM_MODE_DPMS_ON) -+ psb_intel_lvds_set_power(dev, output, true); -+ else -+ psb_intel_lvds_set_power(dev, output, false); -+ -+ /* XXX: We never power down the LVDS pairs. */ -+} -+ -+static void psb_intel_lvds_save(struct drm_connector *connector) -+{ -+#if 0 /* JB: Disable for drop */ -+ struct drm_device *dev = connector->dev; -+ -+ dev_priv->savePP_ON = REG_READ(PP_ON_DELAYS); -+ dev_priv->savePP_OFF = REG_READ(PP_OFF_DELAYS); -+ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL); -+ dev_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR); -+ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); -+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & -+ BACKLIGHT_DUTY_CYCLE_MASK); -+ -+ /* -+ * If the light is off at server startup, just make it full brightness -+ */ -+ if (dev_priv->backlight_duty_cycle == 0) -+ dev_priv->backlight_duty_cycle = -+ psb_intel_lvds_get_max_backlight(dev); -+#endif -+} -+ -+static void psb_intel_lvds_restore(struct drm_connector *connector) -+{ -+#if 0 /* JB: Disable for drop */ -+ struct drm_device *dev = connector->dev; -+ -+ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); -+ REG_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); -+ REG_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); -+ REG_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); -+ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); -+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) -+ psb_intel_lvds_set_power(dev, true); -+ else -+ psb_intel_lvds_set_power(dev, false); -+#endif -+} -+ -+static int psb_intel_lvds_mode_valid(struct drm_connector *connector, -+ struct drm_display_mode *mode) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct drm_display_mode *fixed_mode = -+ psb_intel_output->mode_dev->panel_fixed_mode; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (fixed_mode) { -+ if (mode->hdisplay > fixed_mode->hdisplay) -+ return MODE_PANEL; -+ if (mode->vdisplay > fixed_mode->vdisplay) -+ return MODE_PANEL; -+ } -+ return MODE_OK; -+} -+ -+static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct psb_intel_mode_device *mode_dev = -+ enc_to_psb_intel_output(encoder)->mode_dev; -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc); -+ struct drm_encoder *tmp_encoder; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ /* Should never happen!! */ -+ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) { -+ printk(KERN_ERR -+ "Can't support LVDS/MIPI on pipe B on MRST\n"); -+ return false; -+ } else if (!IS_MRST(dev) && !IS_I965G(dev) -+ && psb_intel_crtc->pipe == 0) { -+ printk(KERN_ERR "Can't support LVDS on pipe A\n"); -+ return false; -+ } -+ /* Should never happen!! */ -+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, -+ head) { -+ if (tmp_encoder != encoder -+ && tmp_encoder->crtc == encoder->crtc) { -+ printk(KERN_ERR "Can't enable LVDS and another " -+ "encoder on the same pipe\n"); -+ return false; -+ } -+ } -+ -+ /* -+ * If we have timings from the BIOS for the panel, put them in -+ * to the adjusted mode. The CRTC will be set up for this mode, -+ * with the panel scaling set up to source from the H/VDisplay -+ * of the original mode. -+ */ -+ if (mode_dev->panel_fixed_mode != NULL) { -+ adjusted_mode->hdisplay = -+ mode_dev->panel_fixed_mode->hdisplay; -+ adjusted_mode->hsync_start = -+ mode_dev->panel_fixed_mode->hsync_start; -+ adjusted_mode->hsync_end = -+ mode_dev->panel_fixed_mode->hsync_end; -+ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal; -+ adjusted_mode->vdisplay = -+ mode_dev->panel_fixed_mode->vdisplay; -+ adjusted_mode->vsync_start = -+ mode_dev->panel_fixed_mode->vsync_start; -+ adjusted_mode->vsync_end = -+ mode_dev->panel_fixed_mode->vsync_end; -+ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal; -+ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock; -+ drm_mode_set_crtcinfo(adjusted_mode, -+ CRTC_INTERLACE_HALVE_V); -+ } -+ -+ /* -+ * XXX: It would be nice to support lower refresh rates on the -+ * panels to reduce power consumption, and perhaps match the -+ * user's requested refresh rate. -+ */ -+ -+ return true; -+} -+ -+static void psb_intel_lvds_prepare(struct drm_encoder *encoder) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_mode_device *mode_dev = output->mode_dev; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); -+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL & -+ BACKLIGHT_DUTY_CYCLE_MASK); -+ -+ psb_intel_lvds_set_power(dev, output, false); -+} -+ -+static void psb_intel_lvds_commit(struct drm_encoder *encoder) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_mode_device *mode_dev = output->mode_dev; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (mode_dev->backlight_duty_cycle == 0) -+ mode_dev->backlight_duty_cycle = -+ psb_intel_lvds_get_max_backlight(dev); -+ -+ psb_intel_lvds_set_power(dev, output, true); -+} -+ -+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct psb_intel_mode_device *mode_dev = -+ enc_to_psb_intel_output(encoder)->mode_dev; -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc); -+ u32 pfit_control; -+ -+ /* -+ * The LVDS pin pair will already have been turned on in the -+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL -+ * settings. -+ */ -+ -+ /* -+ * Enable automatic panel scaling so that non-native modes fill the -+ * screen. Should be enabled before the pipe is enabled, according to -+ * register description and PRM. -+ */ -+ if (mode->hdisplay != adjusted_mode->hdisplay || -+ mode->vdisplay != adjusted_mode->vdisplay) -+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | -+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | -+ HORIZ_INTERP_BILINEAR); -+ else -+ pfit_control = 0; -+ -+ if (!IS_I965G(dev)) { -+ if (mode_dev->panel_wants_dither) -+ pfit_control |= PANEL_8TO6_DITHER_ENABLE; -+ } else -+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT; -+ -+ REG_WRITE(PFIT_CONTROL, pfit_control); -+} -+ -+/** -+ * Detect the LVDS connection. -+ * -+ * This always returns CONNECTOR_STATUS_CONNECTED. -+ * This connector should only have -+ * been set up if the LVDS was actually connected anyway. -+ */ -+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector -+ *connector) -+{ -+ return connector_status_connected; -+} -+ -+/** -+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. -+ */ -+static int psb_intel_lvds_get_modes(struct drm_connector *connector) -+{ -+ struct drm_device *dev = connector->dev; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev; -+ int ret = 0; -+ -+ if (!IS_MRST(dev)) -+ ret = psb_intel_ddc_get_modes(psb_intel_output); -+ -+ if (ret) -+ return ret; -+ -+ /* Didn't get an EDID, so -+ * Set wide sync ranges so we get all modes -+ * handed to valid_mode for checking -+ */ -+ connector->display_info.min_vfreq = 0; -+ connector->display_info.max_vfreq = 200; -+ connector->display_info.min_hfreq = 0; -+ connector->display_info.max_hfreq = 200; -+ -+ if (mode_dev->panel_fixed_mode != NULL) { -+ struct drm_display_mode *mode = -+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); -+ drm_mode_probed_add(connector, mode); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+/** -+ * psb_intel_lvds_destroy - unregister and free LVDS structures -+ * @connector: connector to free -+ * -+ * Unregister the DDC bus for this connector then free the driver private -+ * structure. -+ */ -+static void psb_intel_lvds_destroy(struct drm_connector *connector) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ if (psb_intel_output->ddc_bus) -+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus); -+ drm_sysfs_connector_remove(connector); -+ drm_connector_cleanup(connector); -+ kfree(connector); -+} -+ -+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = { -+ .dpms = psb_intel_lvds_dpms, -+ .mode_fixup = psb_intel_lvds_mode_fixup, -+ .prepare = psb_intel_lvds_prepare, -+ .mode_set = psb_intel_lvds_mode_set, -+ .commit = psb_intel_lvds_commit, -+}; -+ -+static const struct drm_connector_helper_funcs -+ psb_intel_lvds_connector_helper_funcs = { -+ .get_modes = psb_intel_lvds_get_modes, -+ .mode_valid = psb_intel_lvds_mode_valid, -+ .best_encoder = psb_intel_best_encoder, -+}; -+ -+static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { -+ .save = psb_intel_lvds_save, -+ .restore = psb_intel_lvds_restore, -+ .detect = psb_intel_lvds_detect, -+ .fill_modes = drm_helper_probe_single_connector_modes, -+ .destroy = psb_intel_lvds_destroy, -+}; -+ -+ -+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder) -+{ -+ drm_encoder_cleanup(encoder); -+} -+ -+static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = { -+ .destroy = psb_intel_lvds_enc_destroy, -+}; -+ -+ -+ -+/** -+ * psb_intel_lvds_init - setup LVDS connectors on this device -+ * @dev: drm device -+ * -+ * Create the connector, register the LVDS DDC bus, and try to figure out what -+ * modes we can display on the LVDS panel (if present). -+ */ -+void psb_intel_lvds_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev) -+{ -+ struct psb_intel_output *psb_intel_output; -+ struct drm_connector *connector; -+ struct drm_encoder *encoder; -+ struct drm_display_mode *scan; /* *modes, *bios_mode; */ -+ struct drm_crtc *crtc; -+ u32 lvds; -+ int pipe; -+ -+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); -+ if (!psb_intel_output) -+ return; -+ -+ psb_intel_output->mode_dev = mode_dev; -+ connector = &psb_intel_output->base; -+ encoder = &psb_intel_output->enc; -+ drm_connector_init(dev, &psb_intel_output->base, -+ &psb_intel_lvds_connector_funcs, -+ DRM_MODE_CONNECTOR_LVDS); -+ -+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, -+ DRM_MODE_ENCODER_LVDS); -+ -+ drm_mode_connector_attach_encoder(&psb_intel_output->base, -+ &psb_intel_output->enc); -+ psb_intel_output->type = INTEL_OUTPUT_LVDS; -+ -+ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs); -+ drm_connector_helper_add(connector, -+ &psb_intel_lvds_connector_helper_funcs); -+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; -+ connector->interlace_allowed = false; -+ connector->doublescan_allowed = false; -+ -+ -+ /* -+ * LVDS discovery: -+ * 1) check for EDID on DDC -+ * 2) check for VBT data -+ * 3) check to see if LVDS is already on -+ * if none of the above, no panel -+ * 4) make sure lid is open -+ * if closed, act like it's not there for now -+ */ -+ -+ /* Set up the DDC bus. */ -+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); -+ if (!psb_intel_output->ddc_bus) { -+ dev_printk(KERN_ERR, &dev->pdev->dev, -+ "DDC bus registration " "failed.\n"); -+ goto failed_ddc; -+ } -+ -+ /* -+ * Attempt to get the fixed panel mode from DDC. Assume that the -+ * preferred mode is the right one. -+ */ -+ psb_intel_ddc_get_modes(psb_intel_output); -+ list_for_each_entry(scan, &connector->probed_modes, head) { -+ if (scan->type & DRM_MODE_TYPE_PREFERRED) { -+ mode_dev->panel_fixed_mode = -+ drm_mode_duplicate(dev, scan); -+ goto out; /* FIXME: check for quirks */ -+ } -+ } -+ -+ /* Failed to get EDID, what about VBT? */ -+ if (mode_dev->vbt_mode) -+ mode_dev->panel_fixed_mode = -+ drm_mode_duplicate(dev, mode_dev->vbt_mode); -+ -+ /* -+ * If we didn't get EDID, try checking if the panel is already turned -+ * on. If so, assume that whatever is currently programmed is the -+ * correct mode. -+ */ -+ lvds = REG_READ(LVDS); -+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; -+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe); -+ -+ if (crtc && (lvds & LVDS_PORT_EN)) { -+ mode_dev->panel_fixed_mode = -+ psb_intel_crtc_mode_get(dev, crtc); -+ if (mode_dev->panel_fixed_mode) { -+ mode_dev->panel_fixed_mode->type |= -+ DRM_MODE_TYPE_PREFERRED; -+ goto out; /* FIXME: check for quirks */ -+ } -+ } -+ -+ /* If we still don't have a mode after all that, give up. */ -+ if (!mode_dev->panel_fixed_mode) { -+ DRM_DEBUG -+ ("Found no modes on the lvds, ignoring the LVDS\n"); -+ goto failed_find; -+ } -+ -+ /* FIXME: detect aopen & mac mini type stuff automatically? */ -+ /* -+ * Blacklist machines with BIOSes that list an LVDS panel without -+ * actually having one. -+ */ -+ if (IS_I945GM(dev)) { -+ /* aopen mini pc */ -+ if (dev->pdev->subsystem_vendor == 0xa0a0) { -+ DRM_DEBUG -+ ("Suspected AOpen Mini PC, ignoring the LVDS\n"); -+ goto failed_find; -+ } -+ -+ if ((dev->pdev->subsystem_vendor == 0x8086) && -+ (dev->pdev->subsystem_device == 0x7270)) { -+ /* It's a Mac Mini or Macbook Pro. */ -+ -+ if (mode_dev->panel_fixed_mode != NULL && -+ mode_dev->panel_fixed_mode->hdisplay == 800 && -+ mode_dev->panel_fixed_mode->vdisplay == 600) { -+ DRM_DEBUG -+ ("Suspected Mac Mini, ignoring the LVDS\n"); -+ goto failed_find; -+ } -+ } -+ } -+ -+out: -+ drm_sysfs_connector_add(connector); -+ -+#if PRINT_JLIU7 -+ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n", -+ mode_dev->panel_fixed_mode->hdisplay); -+ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n", -+ mode_dev->panel_fixed_mode->vdisplay); -+ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n", -+ mode_dev->panel_fixed_mode->hsync_start); -+ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n", -+ mode_dev->panel_fixed_mode->hsync_end); -+ DRM_INFO("PRINT_JLIU7 htotal = %d\n", -+ mode_dev->panel_fixed_mode->htotal); -+ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n", -+ mode_dev->panel_fixed_mode->vsync_start); -+ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n", -+ mode_dev->panel_fixed_mode->vsync_end); -+ DRM_INFO("PRINT_JLIU7 vtotal = %d\n", -+ mode_dev->panel_fixed_mode->vtotal); -+ DRM_INFO("PRINT_JLIU7 clock = %d\n", -+ mode_dev->panel_fixed_mode->clock); -+#endif /* PRINT_JLIU7 */ -+ return; -+ -+failed_find: -+ if (psb_intel_output->ddc_bus) -+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus); -+failed_ddc: -+ drm_encoder_cleanup(encoder); -+ drm_connector_cleanup(connector); -+ kfree(connector); -+} -+ -+/* MRST platform start */ -+ -+/* -+ * FIXME need to move to register define head file -+ */ -+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16) -+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16) -+ -+/* The max/min PWM frequency in BPCR[31:17] - */ -+/* The smallest number is 1 (not 0) that can fit in the -+ * 15-bit field of the and then*/ -+/* shifts to the left by one bit to get the actual 16-bit -+ * value that the 15-bits correspond to.*/ -+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF -+ -+#define BRIGHTNESS_MAX_LEVEL 100 -+#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */ -+#define BLC_PWM_FREQ_CALC_CONSTANT 32 -+#define MHz 1000000 -+#define BLC_POLARITY_NORMAL 0 -+#define BLC_POLARITY_INVERSE 1 -+ -+/** -+ * Calculate PWM control register value. -+ */ -+static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev) -+{ -+ unsigned long value = 0; -+ if (blc_freq == 0) { -+ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq: -+ * Frequency Requested is 0.\n"); */ -+ return false; -+ } -+ -+ value = (CoreClock * MHz); -+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT); -+ value = (value * BLC_PWM_PRECISION_FACTOR); -+ value = (value / blc_freq); -+ value = (value / BLC_PWM_PRECISION_FACTOR); -+ -+ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) { -+ return 0; -+ } else { -+ PWMControlRegFreq = (u32) value; -+ return 1; -+ } -+} -+ -+/** -+ * Returns the maximum level of the backlight duty cycle field. -+ */ -+static u32 mrst_lvds_get_PWM_ctrl_freq(struct drm_device *dev) -+{ -+ u32 max_pwm_blc = 0; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_get_PWM_ctrl_freq \n"); -+#endif /* PRINT_JLIU7 */ -+ -+/*FIXME JLIU7 get the PWM frequency from configuration */ -+ -+ max_pwm_blc = -+ (REG_READ(BLC_PWM_CTL) & MRST_BACKLIGHT_MODULATION_FREQ_MASK) -+ >> MRST_BACKLIGHT_MODULATION_FREQ_SHIFT; -+ -+ -+ if (!max_pwm_blc) { -+ if (mrstLVDSCalculatePWMCtrlRegFreq(dev)) -+ max_pwm_blc = PWMControlRegFreq; -+ } -+ -+ return max_pwm_blc; -+} -+ -+/** -+ * Sets the backlight level. -+ * -+ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight(). -+ */ -+static void mrst_lvds_set_backlight(struct drm_device *dev, int level) -+{ -+ u32 blc_pwm_ctl; -+ u32 max_pwm_blc; -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_set_backlight \n"); -+#endif /* PRINT_JLIU7 */ -+ -+#if 1 /* FIXME JLIU7 */ -+ return; -+#endif /* FIXME JLIU7 */ -+ -+ /* Provent LVDS going to total black */ -+ if (level < 20) -+ level = 20; -+ -+ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev); -+ -+ if (max_pwm_blc == 0) -+ return; -+ -+ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL; -+ -+ if (blc_pol == BLC_POLARITY_INVERSE) -+ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl; -+ -+ REG_WRITE(BLC_PWM_CTL, -+ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) | -+ blc_pwm_ctl); -+} -+ -+/** -+ * Sets the power state for the panel. -+ */ -+static void mrst_lvds_set_power(struct drm_device *dev, -+ struct psb_intel_output *output, bool on) -+{ -+ u32 pp_status; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (on) { -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | -+ POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY); -+ -+ mrst_lvds_set_backlight(dev, lvds_backlight); -+ } else { -+ mrst_lvds_set_backlight(dev, 0); -+ -+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & -+ ~POWER_TARGET_ON); -+ do { -+ pp_status = REG_READ(PP_STATUS); -+ } while (pp_status & PP_ON); -+ } -+} -+ -+static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder); -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ if (mode == DRM_MODE_DPMS_ON) -+ mrst_lvds_set_power(dev, output, true); -+ else -+ mrst_lvds_set_power(dev, output, false); -+ -+ /* XXX: We never power down the LVDS pairs. */ -+} -+ -+static void mrst_lvds_mode_set(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct psb_intel_mode_device *mode_dev = -+ enc_to_psb_intel_output(encoder)->mode_dev; -+ struct drm_device *dev = encoder->dev; -+ u32 pfit_control; -+ u32 lvds_port; -+ -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ /* -+ * The LVDS pin pair will already have been turned on in the -+ * psb_intel_crtc_mode_set since it has a large impact on the DPLL -+ * settings. -+ */ -+ /*FIXME JLIU7 Get panel power delay parameters from config data */ -+ REG_WRITE(0x61208, 0x25807d0); -+ REG_WRITE(0x6120c, 0x1f407d0); -+ REG_WRITE(0x61210, 0x270f04); -+ -+ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN; -+ -+ if (mode_dev->panel_wants_dither) -+ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE; -+ -+ REG_WRITE(LVDS, lvds_port); -+ -+ /* -+ * Enable automatic panel scaling so that non-native modes fill the -+ * screen. Should be enabled before the pipe is enabled, according to -+ * register description and PRM. -+ */ -+ if (mode->hdisplay != adjusted_mode->hdisplay || -+ mode->vdisplay != adjusted_mode->vdisplay) -+ pfit_control = PFIT_ENABLE; -+ else -+ pfit_control = 0; -+ -+ REG_WRITE(PFIT_CONTROL, pfit_control); -+} -+ -+ -+static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = { -+ .dpms = mrst_lvds_dpms, -+ .mode_fixup = psb_intel_lvds_mode_fixup, -+ .prepare = psb_intel_lvds_prepare, -+ .mode_set = mrst_lvds_mode_set, -+ .commit = psb_intel_lvds_commit, -+}; -+ -+/** Returns the panel fixed mode from configuration. */ -+/** FIXME JLIU7 need to revist it. */ -+struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device -+ *dev) -+{ -+ struct drm_display_mode *mode; -+ -+ mode = kzalloc(sizeof(*mode), GFP_KERNEL); -+ if (!mode) -+ return NULL; -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for TPO LTPS LPJ040K001A */ -+ mode->hdisplay = 800; -+ mode->vdisplay = 480; -+ mode->hsync_start = 836; -+ mode->hsync_end = 846; -+ mode->htotal = 1056; -+ mode->vsync_start = 489; -+ mode->vsync_end = 491; -+ mode->vtotal = 525; -+ mode->clock = 33264; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 800x480 */ -+ mode->hdisplay = 800; -+ mode->vdisplay = 480; -+ mode->hsync_start = 801; -+ mode->hsync_end = 802; -+ mode->htotal = 1024; -+ mode->vsync_start = 481; -+ mode->vsync_end = 482; -+ mode->vtotal = 525; -+ mode->clock = 30994; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */ -+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1072; -+ mode->hsync_end = 1104; -+ mode->htotal = 1184; -+ mode->vsync_start = 603; -+ mode->vsync_end = 604; -+ mode->vtotal = 608; -+ mode->clock = 53990; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */ -+ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1104; -+ mode->hsync_end = 1136; -+ mode->htotal = 1184; -+ mode->vsync_start = 603; -+ mode->vsync_end = 604; -+ mode->vtotal = 608; -+ mode->clock = 53990; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 600; -+ mode->hsync_start = 1124; -+ mode->hsync_end = 1204; -+ mode->htotal = 1312; -+ mode->vsync_start = 607; -+ mode->vsync_end = 610; -+ mode->vtotal = 621; -+ mode->clock = 48885; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 1024x768 */ -+ mode->hdisplay = 1024; -+ mode->vdisplay = 768; -+ mode->hsync_start = 1048; -+ mode->hsync_end = 1184; -+ mode->htotal = 1344; -+ mode->vsync_start = 771; -+ mode->vsync_end = 777; -+ mode->vtotal = 806; -+ mode->clock = 65000; -+#endif /*FIXME jliu7 remove it later */ -+ -+#if 0 /*FIXME jliu7 remove it later */ -+ /* hard coded fixed mode for LVDS 1366x768 */ -+ mode->hdisplay = 1366; -+ mode->vdisplay = 768; -+ mode->hsync_start = 1430; -+ mode->hsync_end = 1558; -+ mode->htotal = 1664; -+ mode->vsync_start = 769; -+ mode->vsync_end = 770; -+ mode->vtotal = 776; -+ mode->clock = 77500; -+#endif /*FIXME jliu7 remove it later */ -+ -+ drm_mode_set_name(mode); -+ drm_mode_set_crtcinfo(mode, 0); -+ -+ return mode; -+} -+ -+/** -+ * mrst_lvds_init - setup LVDS connectors on this device -+ * @dev: drm device -+ * -+ * Create the connector, register the LVDS DDC bus, and try to figure out what -+ * modes we can display on the LVDS panel (if present). -+ */ -+void mrst_lvds_init(struct drm_device *dev, -+ struct psb_intel_mode_device *mode_dev) -+{ -+ struct psb_intel_output *psb_intel_output; -+ struct drm_connector *connector; -+ struct drm_encoder *encoder; -+#if MRST_I2C -+ struct drm_display_mode *scan; /* *modes, *bios_mode; */ -+#endif -+#if PRINT_JLIU7 -+ DRM_INFO("JLIU7 enter mrst_lvds_init \n"); -+#endif /* PRINT_JLIU7 */ -+ -+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL); -+ if (!psb_intel_output) -+ return; -+ -+ psb_intel_output->mode_dev = mode_dev; -+ connector = &psb_intel_output->base; -+ encoder = &psb_intel_output->enc; -+ drm_connector_init(dev, &psb_intel_output->base, -+ &psb_intel_lvds_connector_funcs, -+ DRM_MODE_CONNECTOR_LVDS); -+ -+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs, -+ DRM_MODE_ENCODER_LVDS); -+ -+ drm_mode_connector_attach_encoder(&psb_intel_output->base, -+ &psb_intel_output->enc); -+ psb_intel_output->type = INTEL_OUTPUT_LVDS; -+ -+ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs); -+ drm_connector_helper_add(connector, -+ &psb_intel_lvds_connector_helper_funcs); -+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; -+ connector->interlace_allowed = false; -+ connector->doublescan_allowed = false; -+ -+ lvds_backlight = BRIGHTNESS_MAX_LEVEL; -+ -+ /* -+ * LVDS discovery: -+ * 1) check for EDID on DDC -+ * 2) check for VBT data -+ * 3) check to see if LVDS is already on -+ * if none of the above, no panel -+ * 4) make sure lid is open -+ * if closed, act like it's not there for now -+ */ -+ -+#if MRST_I2C -+ /* Set up the DDC bus. */ -+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); -+ if (!psb_intel_output->ddc_bus) { -+ dev_printk(KERN_ERR, &dev->pdev->dev, -+ "DDC bus registration " "failed.\n"); -+ goto failed_ddc; -+ } -+ -+ /* -+ * Attempt to get the fixed panel mode from DDC. Assume that the -+ * preferred mode is the right one. -+ */ -+ psb_intel_ddc_get_modes(psb_intel_output); -+ list_for_each_entry(scan, &connector->probed_modes, head) { -+ if (scan->type & DRM_MODE_TYPE_PREFERRED) { -+ mode_dev->panel_fixed_mode = -+ drm_mode_duplicate(dev, scan); -+ goto out; /* FIXME: check for quirks */ -+ } -+ } -+#endif /* MRST_I2C */ -+ -+ /* -+ * If we didn't get EDID, try geting panel timing -+ * from configuration data -+ */ -+ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev); -+ -+ if (mode_dev->panel_fixed_mode) { -+ mode_dev->panel_fixed_mode->type |= -+ DRM_MODE_TYPE_PREFERRED; -+ goto out; /* FIXME: check for quirks */ -+ } -+ -+ /* If we still don't have a mode after all that, give up. */ -+ if (!mode_dev->panel_fixed_mode) { -+ DRM_DEBUG -+ ("Found no modes on the lvds, ignoring the LVDS\n"); -+ goto failed_find; -+ } -+ -+out: -+ drm_sysfs_connector_add(connector); -+ return; -+ -+failed_find: -+ DRM_DEBUG("No LVDS modes found, disabling.\n"); -+ if (psb_intel_output->ddc_bus) -+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus); -+#if MRST_I2C -+failed_ddc: -+#endif -+ drm_encoder_cleanup(encoder); -+ drm_connector_cleanup(connector); -+ kfree(connector); -+} -+ -+/* MRST platform end */ -diff -uNr a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c ---- a/drivers/gpu/drm/psb/psb_intel_modes.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_modes.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,64 @@ -+/* -+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> -+ * Copyright (c) 2007 Intel Corporation -+ * Jesse Barnes <jesse.barnes@intel.com> -+ */ -+ -+#include <linux/i2c.h> -+#include <linux/fb.h> -+#include <drm/drmP.h> -+#include "psb_intel_drv.h" -+ -+/** -+ * psb_intel_ddc_probe -+ * -+ */ -+bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output) -+{ -+ u8 out_buf[] = { 0x0, 0x0 }; -+ u8 buf[2]; -+ int ret; -+ struct i2c_msg msgs[] = { -+ { -+ .addr = 0x50, -+ .flags = 0, -+ .len = 1, -+ .buf = out_buf, -+ }, -+ { -+ .addr = 0x50, -+ .flags = I2C_M_RD, -+ .len = 1, -+ .buf = buf, -+ } -+ }; -+ -+ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2); -+ if (ret == 2) -+ return true; -+ -+ return false; -+} -+ -+/** -+ * psb_intel_ddc_get_modes - get modelist from monitor -+ * @connector: DRM connector device to use -+ * -+ * Fetch the EDID information from @connector using the DDC bus. -+ */ -+int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output) -+{ -+ struct edid *edid; -+ int ret = 0; -+ -+ edid = -+ drm_get_edid(&psb_intel_output->base, -+ &psb_intel_output->ddc_bus->adapter); -+ if (edid) { -+ drm_mode_connector_update_edid_property(&psb_intel_output-> -+ base, edid); -+ ret = drm_add_edid_modes(&psb_intel_output->base, edid); -+ kfree(edid); -+ } -+ return ret; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h ---- a/drivers/gpu/drm/psb/psb_intel_reg.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_reg.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,972 @@ -+#define BLC_PWM_CTL 0x61254 -+#define BLC_PWM_CTL2 0x61250 -+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) -+/** -+ * This is the most significant 15 bits of the number of backlight cycles in a -+ * complete cycle of the modulated backlight control. -+ * -+ * The actual value is this field multiplied by two. -+ */ -+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -+#define BLM_LEGACY_MODE (1 << 16) -+/** -+ * This is the number of cycles out of the backlight modulation cycle for which -+ * the backlight is on. -+ * -+ * This field must be no greater than the number of cycles in the complete -+ * backlight modulation cycle. -+ */ -+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) -+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) -+ -+#define I915_GCFGC 0xf0 -+#define I915_LOW_FREQUENCY_ENABLE (1 << 7) -+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4) -+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4) -+#define I915_DISPLAY_CLOCK_MASK (7 << 4) -+ -+#define I855_HPLLCC 0xc0 -+#define I855_CLOCK_CONTROL_MASK (3 << 0) -+#define I855_CLOCK_133_200 (0 << 0) -+#define I855_CLOCK_100_200 (1 << 0) -+#define I855_CLOCK_100_133 (2 << 0) -+#define I855_CLOCK_166_250 (3 << 0) -+ -+/* I830 CRTC registers */ -+#define HTOTAL_A 0x60000 -+#define HBLANK_A 0x60004 -+#define HSYNC_A 0x60008 -+#define VTOTAL_A 0x6000c -+#define VBLANK_A 0x60010 -+#define VSYNC_A 0x60014 -+#define PIPEASRC 0x6001c -+#define BCLRPAT_A 0x60020 -+#define VSYNCSHIFT_A 0x60028 -+ -+#define HTOTAL_B 0x61000 -+#define HBLANK_B 0x61004 -+#define HSYNC_B 0x61008 -+#define VTOTAL_B 0x6100c -+#define VBLANK_B 0x61010 -+#define VSYNC_B 0x61014 -+#define PIPEBSRC 0x6101c -+#define BCLRPAT_B 0x61020 -+#define VSYNCSHIFT_B 0x61028 -+ -+#define PP_STATUS 0x61200 -+# define PP_ON (1 << 31) -+/** -+ * Indicates that all dependencies of the panel are on: -+ * -+ * - PLL enabled -+ * - pipe enabled -+ * - LVDS/DVOB/DVOC on -+ */ -+# define PP_READY (1 << 30) -+# define PP_SEQUENCE_NONE (0 << 28) -+# define PP_SEQUENCE_ON (1 << 28) -+# define PP_SEQUENCE_OFF (2 << 28) -+# define PP_SEQUENCE_MASK 0x30000000 -+#define PP_CONTROL 0x61204 -+# define POWER_TARGET_ON (1 << 0) -+ -+#define LVDSPP_ON 0x61208 -+#define LVDSPP_OFF 0x6120c -+#define PP_CYCLE 0x61210 -+ -+#define PFIT_CONTROL 0x61230 -+# define PFIT_ENABLE (1 << 31) -+# define PFIT_PIPE_MASK (3 << 29) -+# define PFIT_PIPE_SHIFT 29 -+# define VERT_INTERP_DISABLE (0 << 10) -+# define VERT_INTERP_BILINEAR (1 << 10) -+# define VERT_INTERP_MASK (3 << 10) -+# define VERT_AUTO_SCALE (1 << 9) -+# define HORIZ_INTERP_DISABLE (0 << 6) -+# define HORIZ_INTERP_BILINEAR (1 << 6) -+# define HORIZ_INTERP_MASK (3 << 6) -+# define HORIZ_AUTO_SCALE (1 << 5) -+# define PANEL_8TO6_DITHER_ENABLE (1 << 3) -+ -+#define PFIT_PGM_RATIOS 0x61234 -+# define PFIT_VERT_SCALE_MASK 0xfff00000 -+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0 -+ -+#define PFIT_AUTO_RATIOS 0x61238 -+ -+ -+#define DPLL_A 0x06014 -+#define DPLL_B 0x06018 -+# define DPLL_VCO_ENABLE (1 << 31) -+# define DPLL_DVO_HIGH_SPEED (1 << 30) -+# define DPLL_SYNCLOCK_ENABLE (1 << 29) -+# define DPLL_VGA_MODE_DIS (1 << 28) -+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ -+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */ -+# define DPLL_MODE_MASK (3 << 26) -+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ -+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ -+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ -+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ -+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ -+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ -+/** -+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this -+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set. -+ */ -+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 -+/** -+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within -+ * this field (only one bit may be set). -+ */ -+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 -+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16 -+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required -+ * in DVO non-gang */ -+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ -+# define PLL_REF_INPUT_DREFCLK (0 << 13) -+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ -+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO -+ * TVCLKIN */ -+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) -+# define PLL_REF_INPUT_MASK (3 << 13) -+# define PLL_LOAD_PULSE_PHASE_SHIFT 9 -+/* -+ * Parallel to Serial Load Pulse phase selection. -+ * Selects the phase for the 10X DPLL clock for the PCIe -+ * digital display port. The range is 4 to 13; 10 or more -+ * is just a flip delay. The default is 6 -+ */ -+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) -+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8) -+ -+/** -+ * SDVO multiplier for 945G/GM. Not used on 965. -+ * -+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK -+ */ -+# define SDVO_MULTIPLIER_MASK 0x000000ff -+# define SDVO_MULTIPLIER_SHIFT_HIRES 4 -+# define SDVO_MULTIPLIER_SHIFT_VGA 0 -+ -+/** @defgroup DPLL_MD -+ * @{ -+ */ -+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */ -+#define DPLL_A_MD 0x0601c -+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */ -+#define DPLL_B_MD 0x06020 -+/** -+ * UDI pixel divider, controlling how many pixels are stuffed into a packet. -+ * -+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO. -+ */ -+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 -+# define DPLL_MD_UDI_DIVIDER_SHIFT 24 -+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ -+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 -+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 -+/** -+ * SDVO/UDI pixel multiplier. -+ * -+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus -+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate -+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing -+ * dummy bytes in the datastream at an increased clock rate, with both sides of -+ * the link knowing how many bytes are fill. -+ * -+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock -+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be -+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and -+ * through an SDVO command. -+ * -+ * This register field has values of multiplication factor minus 1, with -+ * a maximum multiplier of 5 for SDVO. -+ */ -+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 -+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 -+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. -+ * This best be set to the default value (3) or the CRT won't work. No, -+ * I don't entirely understand what this does... -+ */ -+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f -+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 -+/** @} */ -+ -+#define DPLL_TEST 0x606c -+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22) -+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22) -+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22) -+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) -+# define DPLLB_TEST_N_BYPASS (1 << 19) -+# define DPLLB_TEST_M_BYPASS (1 << 18) -+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) -+# define DPLLA_TEST_N_BYPASS (1 << 3) -+# define DPLLA_TEST_M_BYPASS (1 << 2) -+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) -+ -+#define ADPA 0x61100 -+#define ADPA_DAC_ENABLE (1<<31) -+#define ADPA_DAC_DISABLE 0 -+#define ADPA_PIPE_SELECT_MASK (1<<30) -+#define ADPA_PIPE_A_SELECT 0 -+#define ADPA_PIPE_B_SELECT (1<<30) -+#define ADPA_USE_VGA_HVPOLARITY (1<<15) -+#define ADPA_SETS_HVPOLARITY 0 -+#define ADPA_VSYNC_CNTL_DISABLE (1<<11) -+#define ADPA_VSYNC_CNTL_ENABLE 0 -+#define ADPA_HSYNC_CNTL_DISABLE (1<<10) -+#define ADPA_HSYNC_CNTL_ENABLE 0 -+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) -+#define ADPA_VSYNC_ACTIVE_LOW 0 -+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) -+#define ADPA_HSYNC_ACTIVE_LOW 0 -+ -+#define FPA0 0x06040 -+#define FPA1 0x06044 -+#define FPB0 0x06048 -+#define FPB1 0x0604c -+# define FP_N_DIV_MASK 0x003f0000 -+# define FP_N_DIV_SHIFT 16 -+# define FP_M1_DIV_MASK 0x00003f00 -+# define FP_M1_DIV_SHIFT 8 -+# define FP_M2_DIV_MASK 0x0000003f -+# define FP_M2_DIV_SHIFT 0 -+ -+ -+#define PORT_HOTPLUG_EN 0x61110 -+# define SDVOB_HOTPLUG_INT_EN (1 << 26) -+# define SDVOC_HOTPLUG_INT_EN (1 << 25) -+# define TV_HOTPLUG_INT_EN (1 << 18) -+# define CRT_HOTPLUG_INT_EN (1 << 9) -+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3) -+ -+#define PORT_HOTPLUG_STAT 0x61114 -+# define CRT_HOTPLUG_INT_STATUS (1 << 11) -+# define TV_HOTPLUG_INT_STATUS (1 << 10) -+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8) -+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) -+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8) -+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8) -+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7) -+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6) -+ -+#define SDVOB 0x61140 -+#define SDVOC 0x61160 -+#define SDVO_ENABLE (1 << 31) -+#define SDVO_PIPE_B_SELECT (1 << 30) -+#define SDVO_STALL_SELECT (1 << 29) -+#define SDVO_INTERRUPT_ENABLE (1 << 26) -+/** -+ * 915G/GM SDVO pixel multiplier. -+ * -+ * Programmed value is multiplier - 1, up to 5x. -+ * -+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK -+ */ -+#define SDVO_PORT_MULTIPLY_MASK (7 << 23) -+#define SDVO_PORT_MULTIPLY_SHIFT 23 -+#define SDVO_PHASE_SELECT_MASK (15 << 19) -+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) -+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) -+#define SDVOC_GANG_MODE (1 << 16) -+#define SDVO_BORDER_ENABLE (1 << 7) -+#define SDVOB_PCIE_CONCURRENCY (1 << 3) -+#define SDVO_DETECTED (1 << 2) -+/* Bits to be preserved when writing */ -+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14)) -+#define SDVOC_PRESERVE_MASK (1 << 17) -+ -+/** @defgroup LVDS -+ * @{ -+ */ -+/** -+ * This register controls the LVDS output enable, pipe selection, and data -+ * format selection. -+ * -+ * All of the clock/data pairs are force powered down by power sequencing. -+ */ -+#define LVDS 0x61180 -+/** -+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as -+ * the DPLL semantics change when the LVDS is assigned to that pipe. -+ */ -+# define LVDS_PORT_EN (1 << 31) -+/** Selects pipe B for LVDS data. Must be set on pre-965. */ -+# define LVDS_PIPEB_SELECT (1 << 30) -+ -+/** -+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per -+ * pixel. -+ */ -+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) -+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) -+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8) -+/** -+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit -+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be -+ * on. -+ */ -+# define LVDS_A3_POWER_MASK (3 << 6) -+# define LVDS_A3_POWER_DOWN (0 << 6) -+# define LVDS_A3_POWER_UP (3 << 6) -+/** -+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP -+ * is set. -+ */ -+# define LVDS_CLKB_POWER_MASK (3 << 4) -+# define LVDS_CLKB_POWER_DOWN (0 << 4) -+# define LVDS_CLKB_POWER_UP (3 << 4) -+ -+/** -+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 -+ * setting for whether we are in dual-channel mode. The B3 pair will -+ * additionally only be powered up when LVDS_A3_POWER_UP is set. -+ */ -+# define LVDS_B0B3_POWER_MASK (3 << 2) -+# define LVDS_B0B3_POWER_DOWN (0 << 2) -+# define LVDS_B0B3_POWER_UP (3 << 2) -+ -+#define PIPEACONF 0x70008 -+#define PIPEACONF_ENABLE (1<<31) -+#define PIPEACONF_DISABLE 0 -+#define PIPEACONF_DOUBLE_WIDE (1<<30) -+#define I965_PIPECONF_ACTIVE (1<<30) -+#define PIPEACONF_SINGLE_WIDE 0 -+#define PIPEACONF_PIPE_UNLOCKED 0 -+#define PIPEACONF_PIPE_LOCKED (1<<25) -+#define PIPEACONF_PALETTE 0 -+#define PIPEACONF_GAMMA (1<<24) -+#define PIPECONF_FORCE_BORDER (1<<25) -+#define PIPECONF_PROGRESSIVE (0 << 21) -+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) -+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) -+ -+#define PIPEBCONF 0x71008 -+#define PIPEBCONF_ENABLE (1<<31) -+#define PIPEBCONF_DISABLE 0 -+#define PIPEBCONF_DOUBLE_WIDE (1<<30) -+#define PIPEBCONF_DISABLE 0 -+#define PIPEBCONF_GAMMA (1<<24) -+#define PIPEBCONF_PALETTE 0 -+ -+#define PIPEBGCMAXRED 0x71010 -+#define PIPEBGCMAXGREEN 0x71014 -+#define PIPEBGCMAXBLUE 0x71018 -+#define PIPEBSTAT 0x71024 -+#define PIPEBFRAMEHIGH 0x71040 -+#define PIPEBFRAMEPIXEL 0x71044 -+ -+#define DSPARB 0x70030 -+#define DSPFW1 0x70034 -+#define DSPFW2 0x70038 -+#define DSPFW3 0x7003c -+#define DSPFW4 0x70050 -+#define DSPFW5 0x70054 -+#define DSPFW6 0x70058 -+ -+#define DSPACNTR 0x70180 -+#define DSPBCNTR 0x71180 -+#define DISPLAY_PLANE_ENABLE (1<<31) -+#define DISPLAY_PLANE_DISABLE 0 -+#define DISPPLANE_GAMMA_ENABLE (1<<30) -+#define DISPPLANE_GAMMA_DISABLE 0 -+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) -+#define DISPPLANE_8BPP (0x2<<26) -+#define DISPPLANE_15_16BPP (0x4<<26) -+#define DISPPLANE_16BPP (0x5<<26) -+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) -+#define DISPPLANE_32BPP (0x7<<26) -+#define DISPPLANE_STEREO_ENABLE (1<<25) -+#define DISPPLANE_STEREO_DISABLE 0 -+#define DISPPLANE_SEL_PIPE_MASK (1<<24) -+#define DISPPLANE_SEL_PIPE_A 0 -+#define DISPPLANE_SEL_PIPE_B (1<<24) -+#define DISPPLANE_SRC_KEY_ENABLE (1<<22) -+#define DISPPLANE_SRC_KEY_DISABLE 0 -+#define DISPPLANE_LINE_DOUBLE (1<<20) -+#define DISPPLANE_NO_LINE_DOUBLE 0 -+#define DISPPLANE_STEREO_POLARITY_FIRST 0 -+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) -+/* plane B only */ -+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) -+#define DISPPLANE_ALPHA_TRANS_DISABLE 0 -+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0 -+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) -+ -+#define DSPABASE 0x70184 -+#define DSPASTRIDE 0x70188 -+ -+#define DSPBBASE 0x71184 -+#define DSPBADDR DSPBBASE -+#define DSPBSTRIDE 0x71188 -+ -+#define DSPAKEYVAL 0x70194 -+#define DSPAKEYMASK 0x70198 -+ -+#define DSPAPOS 0x7018C /* reserved */ -+#define DSPASIZE 0x70190 -+#define DSPBPOS 0x7118C -+#define DSPBSIZE 0x71190 -+ -+#define DSPASURF 0x7019C -+#define DSPATILEOFF 0x701A4 -+ -+#define DSPBSURF 0x7119C -+#define DSPBTILEOFF 0x711A4 -+ -+#define VGACNTRL 0x71400 -+# define VGA_DISP_DISABLE (1 << 31) -+# define VGA_2X_MODE (1 << 30) -+# define VGA_PIPE_B_SELECT (1 << 29) -+ -+/* -+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount -+ * of video memory available to the BIOS in SWF1. -+ */ -+ -+#define SWF0 0x71410 -+#define SWF1 0x71414 -+#define SWF2 0x71418 -+#define SWF3 0x7141c -+#define SWF4 0x71420 -+#define SWF5 0x71424 -+#define SWF6 0x71428 -+ -+/* -+ * 855 scratch registers. -+ */ -+#define SWF00 0x70410 -+#define SWF01 0x70414 -+#define SWF02 0x70418 -+#define SWF03 0x7041c -+#define SWF04 0x70420 -+#define SWF05 0x70424 -+#define SWF06 0x70428 -+ -+#define SWF10 SWF0 -+#define SWF11 SWF1 -+#define SWF12 SWF2 -+#define SWF13 SWF3 -+#define SWF14 SWF4 -+#define SWF15 SWF5 -+#define SWF16 SWF6 -+ -+#define SWF30 0x72414 -+#define SWF31 0x72418 -+#define SWF32 0x7241c -+ -+ -+/* -+ * Palette registers -+ */ -+#define PALETTE_A 0x0a000 -+#define PALETTE_B 0x0a800 -+ -+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) -+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) -+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) -+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) -+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) -+ -+ -+/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */ -+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG) -+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) -+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) -+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) -+ -+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ -+ (dev)->pci_device == 0x2982 || \ -+ (dev)->pci_device == 0x2992 || \ -+ (dev)->pci_device == 0x29A2 || \ -+ (dev)->pci_device == 0x2A02 || \ -+ (dev)->pci_device == 0x2A12) -+ -+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) -+ -+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ -+ (dev)->pci_device == 0x29B2 || \ -+ (dev)->pci_device == 0x29D2) -+ -+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ -+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \ -+ IS_MRST(dev)) -+ -+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ -+ IS_I945GM(dev) || IS_I965GM(dev) || \ -+ IS_POULSBO(dev) || IS_MRST(dev)) -+ -+/* Cursor A & B regs */ -+#define CURACNTR 0x70080 -+#define CURSOR_MODE_DISABLE 0x00 -+#define CURSOR_MODE_64_32B_AX 0x07 -+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) -+#define MCURSOR_GAMMA_ENABLE (1 << 26) -+#define CURABASE 0x70084 -+#define CURAPOS 0x70088 -+#define CURSOR_POS_MASK 0x007FF -+#define CURSOR_POS_SIGN 0x8000 -+#define CURSOR_X_SHIFT 0 -+#define CURSOR_Y_SHIFT 16 -+#define CURBCNTR 0x700c0 -+#define CURBBASE 0x700c4 -+#define CURBPOS 0x700c8 -+ -+/* -+ * MOORESTOWN delta registers -+ */ -+#define MRST_DPLL_A 0x0f014 -+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */ -+#define MRST_FPA0 0x0f040 -+#define MRST_FPA1 0x0f044 -+ -+/* #define LVDS 0x61180 */ -+# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25) -+# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24) -+# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6) -+ -+#define MIPI 0x61190 -+# define MIPI_PORT_EN (1 << 31) -+ -+/* #define PP_CONTROL 0x61204 */ -+# define POWER_DOWN_ON_RESET (1 << 1) -+ -+/* #define PFIT_CONTROL 0x61230 */ -+# define PFIT_PIPE_SELECT (3 << 29) -+# define PFIT_PIPE_SELECT_SHIFT (29) -+ -+/* #define BLC_PWM_CTL 0x61254 */ -+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16) -+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16) -+ -+/* #define PIPEACONF 0x70008 */ -+#define PIPEACONF_PIPE_STATE (1<<30) -+/* #define DSPACNTR 0x70180 */ -+#if 0 /*FIXME JLIU7 need to define the following */ -+1000 = 32 - bit RGBX(10 : 10 : 10 : 2) -+pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX -+(16 : 16 : 16 : 16) 16 bit floating point pixel format. -+Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format. -+ Ignore -+ alpha. -+#endif /*FIXME JLIU7 need to define the following */ -+ -+#define MRST_DSPABASE 0x7019c -+ -+/* -+ * MOORESTOWN reserved registers -+ */ -+#if 0 -+#define DSPAPOS 0x7018C /* reserved */ -+#define DSPASIZE 0x70190 -+#endif -+/* -+ * Moorestown registers. -+ */ -+/*=========================================================================== -+; General Constants -+;--------------------------------------------------------------------------*/ -+#define BIT0 0x00000001 -+#define BIT1 0x00000002 -+#define BIT2 0x00000004 -+#define BIT3 0x00000008 -+#define BIT4 0x00000010 -+#define BIT5 0x00000020 -+#define BIT6 0x00000040 -+#define BIT7 0x00000080 -+#define BIT8 0x00000100 -+#define BIT9 0x00000200 -+#define BIT10 0x00000400 -+#define BIT11 0x00000800 -+#define BIT12 0x00001000 -+#define BIT13 0x00002000 -+#define BIT14 0x00004000 -+#define BIT15 0x00008000 -+#define BIT16 0x00010000 -+#define BIT17 0x00020000 -+#define BIT18 0x00040000 -+#define BIT19 0x00080000 -+#define BIT20 0x00100000 -+#define BIT21 0x00200000 -+#define BIT22 0x00400000 -+#define BIT23 0x00800000 -+#define BIT24 0x01000000 -+#define BIT25 0x02000000 -+#define BIT26 0x04000000 -+#define BIT27 0x08000000 -+#define BIT28 0x10000000 -+#define BIT29 0x20000000 -+#define BIT30 0x40000000 -+#define BIT31 0x80000000 -+/*=========================================================================== -+; MIPI IP registers -+;--------------------------------------------------------------------------*/ -+#define DEVICE_READY_REG 0xb000 -+#define INTR_STAT_REG 0xb004 -+#define RX_SOT_ERROR BIT0 -+#define RX_SOT_SYNC_ERROR BIT1 -+#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3 -+#define RX_LP_TX_SYNC_ERROR BIT4 -+#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5 -+#define RX_FALSE_CONTROL_ERROR BIT6 -+#define RX_ECC_SINGLE_BIT_ERROR BIT7 -+#define RX_ECC_MULTI_BIT_ERROR BIT8 -+#define RX_CHECKSUM_ERROR BIT9 -+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10 -+#define RX_DSI_VC_ID_INVALID BIT11 -+#define TX_FALSE_CONTROL_ERROR BIT12 -+#define TX_ECC_SINGLE_BIT_ERROR BIT13 -+#define TX_ECC_MULTI_BIT_ERROR BIT14 -+#define TX_CHECKSUM_ERROR BIT15 -+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16 -+#define TX_DSI_VC_ID_INVALID BIT17 -+#define HIGH_CONTENTION BIT18 -+#define LOW_CONTENTION BIT19 -+#define DPI_FIFO_UNDER_RUN BIT20 -+#define HS_TX_TIMEOUT BIT21 -+#define LP_RX_TIMEOUT BIT22 -+#define TURN_AROUND_ACK_TIMEOUT BIT23 -+#define ACK_WITH_NO_ERROR BIT24 -+#define INTR_EN_REG 0xb008 -+#define DSI_FUNC_PRG_REG 0xb00c -+#define DPI_CHANNEL_NUMBER_POS 0x03 -+#define DBI_CHANNEL_NUMBER_POS 0x05 -+#define FMT_DPI_POS 0x07 -+#define FMT_DBI_POS 0x0A -+#define DBI_DATA_WIDTH_POS 0x0D -+#define HS_TX_TIMEOUT_REG 0xb010 -+#define LP_RX_TIMEOUT_REG 0xb014 -+#define TURN_AROUND_TIMEOUT_REG 0xb018 -+#define DEVICE_RESET_REG 0xb01C -+#define DPI_RESOLUTION_REG 0xb020 -+#define RES_V_POS 0x10 -+#define DBI_RESOLUTION_REG 0xb024 -+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028 -+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C -+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030 -+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034 -+#define VERT_SYNC_PAD_COUNT_REG 0xb038 -+#define VERT_BACK_PORCH_COUNT_REG 0xb03c -+#define VERT_FRONT_PORCH_COUNT_REG 0xb040 -+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044 -+#define DPI_CONTROL_REG 0xb048 -+#define DPI_SHUT_DOWN BIT0 -+#define DPI_TURN_ON BIT1 -+#define DPI_COLOR_MODE_ON BIT2 -+#define DPI_COLOR_MODE_OFF BIT3 -+#define DPI_BACK_LIGHT_ON BIT4 -+#define DPI_BACK_LIGHT_OFF BIT5 -+#define DPI_LP BIT6 -+#define DPI_DATA_REG 0xb04c -+#define DPI_BACK_LIGHT_ON_DATA 0x07 -+#define DPI_BACK_LIGHT_OFF_DATA 0x17 -+#define INIT_COUNT_REG 0xb050 -+#define MAX_RET_PAK_REG 0xb054 -+#define VIDEO_FMT_REG 0xb058 -+#define EOT_DISABLE_REG 0xb05c -+#define LP_BYTECLK_REG 0xb060 -+#define LP_GEN_DATA_REG 0xb064 -+#define HS_GEN_DATA_REG 0xb068 -+#define LP_GEN_CTRL_REG 0xb06C -+#define HS_GEN_CTRL_REG 0xb070 -+#define GEN_FIFO_STAT_REG 0xb074 -+#define HS_DATA_FIFO_FULL BIT0 -+#define HS_DATA_FIFO_HALF_EMPTY BIT1 -+#define HS_DATA_FIFO_EMPTY BIT2 -+#define LP_DATA_FIFO_FULL BIT8 -+#define LP_DATA_FIFO_HALF_EMPTY BIT9 -+#define LP_DATA_FIFO_EMPTY BIT10 -+#define HS_CTRL_FIFO_FULL BIT16 -+#define HS_CTRL_FIFO_HALF_EMPTY BIT17 -+#define HS_CTRL_FIFO_EMPTY BIT18 -+#define LP_CTRL_FIFO_FULL BIT24 -+#define LP_CTRL_FIFO_HALF_EMPTY BIT25 -+#define LP_CTRL_FIFO_EMPTY BIT26 -+/*=========================================================================== -+; MIPI Adapter registers -+;--------------------------------------------------------------------------*/ -+#define MIPI_CONTROL_REG 0xb104 -+#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1) -+#define MIPI_DATA_ADDRESS_REG 0xb108 -+#define MIPI_DATA_LENGTH_REG 0xb10C -+#define MIPI_COMMAND_ADDRESS_REG 0xb110 -+#define MIPI_COMMAND_LENGTH_REG 0xb114 -+#define MIPI_READ_DATA_RETURN_REG0 0xb118 -+#define MIPI_READ_DATA_RETURN_REG1 0xb11C -+#define MIPI_READ_DATA_RETURN_REG2 0xb120 -+#define MIPI_READ_DATA_RETURN_REG3 0xb124 -+#define MIPI_READ_DATA_RETURN_REG4 0xb128 -+#define MIPI_READ_DATA_RETURN_REG5 0xb12C -+#define MIPI_READ_DATA_RETURN_REG6 0xb130 -+#define MIPI_READ_DATA_RETURN_REG7 0xb134 -+#define MIPI_READ_DATA_VALID_REG 0xb138 -+/* DBI COMMANDS */ -+#define soft_reset 0x01 -+/* ************************************************************************* *\ -+The display module performs a software reset. -+Registers are written with their SW Reset default values. -+\* ************************************************************************* */ -+#define get_power_mode 0x0a -+/* ************************************************************************* *\ -+The display module returns the current power mode -+\* ************************************************************************* */ -+#define get_address_mode 0x0b -+/* ************************************************************************* *\ -+The display module returns the current status. -+\* ************************************************************************* */ -+#define get_pixel_format 0x0c -+/* ************************************************************************* *\ -+This command gets the pixel format for the RGB image data -+used by the interface. -+\* ************************************************************************* */ -+#define get_display_mode 0x0d -+/* ************************************************************************* *\ -+The display module returns the Display Image Mode status. -+\* ************************************************************************* */ -+#define get_signal_mode 0x0e -+/* ************************************************************************* *\ -+The display module returns the Display Signal Mode. -+\* ************************************************************************* */ -+#define get_diagnostic_result 0x0f -+/* ************************************************************************* *\ -+The display module returns the self-diagnostic results following -+a Sleep Out command. -+\* ************************************************************************* */ -+#define enter_sleep_mode 0x10 -+/* ************************************************************************* *\ -+This command causes the display module to enter the Sleep mode. -+In this mode, all unnecessary blocks inside the display module are disabled -+except interface communication. This is the lowest power mode -+the display module supports. -+\* ************************************************************************* */ -+#define exit_sleep_mode 0x11 -+/* ************************************************************************* *\ -+This command causes the display module to exit Sleep mode. -+All blocks inside the display module are enabled. -+\* ************************************************************************* */ -+#define enter_partial_mode 0x12 -+/* ************************************************************************* *\ -+This command causes the display module to enter the Partial Display Mode. -+The Partial Display Mode window is described by the set_partial_area command. -+\* ************************************************************************* */ -+#define enter_normal_mode 0x13 -+/* ************************************************************************* *\ -+This command causes the display module to enter the Normal mode. -+Normal Mode is defined as Partial Display mode and Scroll mode are off -+\* ************************************************************************* */ -+#define exit_invert_mode 0x20 -+/* ************************************************************************* *\ -+This command causes the display module to stop inverting the image data on -+the display device. The frame memory contents remain unchanged. -+No status bits are changed. -+\* ************************************************************************* */ -+#define enter_invert_mode 0x21 -+/* ************************************************************************* *\ -+This command causes the display module to invert the image data only on -+the display device. The frame memory contents remain unchanged. -+No status bits are changed. -+\* ************************************************************************* */ -+#define set_gamma_curve 0x26 -+/* ************************************************************************* *\ -+This command selects the desired gamma curve for the display device. -+Four fixed gamma curves are defined in section DCS spec. -+\* ************************************************************************* */ -+#define set_display_off 0x28 -+/* ************************************************************************* *\ -+This command causes the display module to stop displaying the image data -+on the display device. The frame memory contents remain unchanged. -+No status bits are changed. -+\* ************************************************************************* */ -+#define set_display_on 0x29 -+/* ************************************************************************* *\ -+This command causes the display module to start displaying the image data -+on the display device. The frame memory contents remain unchanged. -+No status bits are changed. -+\* ************************************************************************* */ -+#define set_column_address 0x2a -+/* ************************************************************************* *\ -+This command defines the column extent of the frame memory accessed by the -+hostprocessor with the read_memory_continue and write_memory_continue commands. -+No status bits are changed. -+\* ************************************************************************* */ -+#define set_page_address 0x2b -+/* ************************************************************************* *\ -+This command defines the page extent of the frame memory accessed by the host -+processor with the write_memory_continue and read_memory_continue command. -+No status bits are changed. -+\* ************************************************************************* */ -+#define write_mem_start 0x2c -+/* ************************************************************************* *\ -+This command transfers image data from the host processor to the display -+module s frame memory starting at the pixel location specified by -+preceding set_column_address and set_page_address commands. -+\* ************************************************************************* */ -+#define set_partial_area 0x30 -+/* ************************************************************************* *\ -+This command defines the Partial Display mode s display area. -+There are two parameters associated with -+this command, the first defines the Start Row (SR) and the second the End Row -+(ER). SR and ER refer to the Frame Memory Line Pointer. -+\* ************************************************************************* */ -+#define set_scroll_area 0x33 -+/* ************************************************************************* *\ -+This command defines the display modules Vertical Scrolling Area. -+\* ************************************************************************* */ -+#define set_tear_off 0x34 -+/* ************************************************************************* *\ -+This command turns off the display modules Tearing Effect output signal on -+the TE signal line. -+\* ************************************************************************* */ -+#define set_tear_on 0x35 -+/* ************************************************************************* *\ -+This command turns on the display modules Tearing Effect output signal -+on the TE signal line. -+\* ************************************************************************* */ -+#define set_address_mode 0x36 -+/* ************************************************************************* *\ -+This command sets the data order for transfers from the host processor to -+display modules frame memory,bits B[7:5] and B3, and from the display -+modules frame memory to the display device, bits B[2:0] and B4. -+\* ************************************************************************* */ -+#define set_scroll_start 0x37 -+/* ************************************************************************* *\ -+This command sets the start of the vertical scrolling area in the frame memory. -+The vertical scrolling area is fully defined when this command is used with -+the set_scroll_area command The set_scroll_start command has one parameter, -+the Vertical Scroll Pointer. The VSP defines the line in the frame memory -+that is written to the display device as the first line of the vertical -+scroll area. -+\* ************************************************************************* */ -+#define exit_idle_mode 0x38 -+/* ************************************************************************* *\ -+This command causes the display module to exit Idle mode. -+\* ************************************************************************* */ -+#define enter_idle_mode 0x39 -+/* ************************************************************************* *\ -+This command causes the display module to enter Idle Mode. -+In Idle Mode, color expression is reduced. Colors are shown on the display -+device using the MSB of each of the R, G and B color components in the frame -+memory -+\* ************************************************************************* */ -+#define set_pixel_format 0x3a -+/* ************************************************************************* *\ -+This command sets the pixel format for the RGB image data used by the interface. -+Bits D[6:4] DPI Pixel Format Definition -+Bits D[2:0] DBI Pixel Format Definition -+Bits D7 and D3 are not used. -+\* ************************************************************************* */ -+#define write_mem_cont 0x3c -+/* ************************************************************************* *\ -+This command transfers image data from the host processor to the display -+module's frame memory continuing from the pixel location following the -+previous write_memory_continue or write_memory_start command. -+\* ************************************************************************* */ -+#define set_tear_scanline 0x44 -+/* ************************************************************************* *\ -+This command turns on the display modules Tearing Effect output signal on the -+TE signal line when the display module reaches line N. -+\* ************************************************************************* */ -+#define get_scanline 0x45 -+/* ************************************************************************* *\ -+The display module returns the current scanline, N, used to update the -+display device. The total number of scanlines on a display device is -+defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as -+the first line of V Sync and is denoted as Line 0. -+When in Sleep Mode, the value returned by get_scanline is undefined. -+\* ************************************************************************* */ -+/* DCS Interface Pixel Formats */ -+#define DCS_PIXEL_FORMAT_3BPP 0x1 -+#define DCS_PIXEL_FORMAT_8BPP 0x2 -+#define DCS_PIXEL_FORMAT_12BPP 0x3 -+#define DCS_PIXEL_FORMAT_16BPP 0x5 -+#define DCS_PIXEL_FORMAT_18BPP 0x6 -+#define DCS_PIXEL_FORMAT_24BPP 0x7 -+/* ONE PARAMETER READ DATA */ -+#define addr_mode_data 0xfc -+#define diag_res_data 0x00 -+#define disp_mode_data 0x23 -+#define pxl_fmt_data 0x77 -+#define pwr_mode_data 0x74 -+#define sig_mode_data 0x00 -+/* TWO PARAMETERS READ DATA */ -+#define scanline_data1 0xff -+#define scanline_data2 0xff -+/* DPI PIXEL FORMATS */ -+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */ -+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */ -+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED -+ * 666 FORMAT -+ */ -+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */ -+#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode -+ * with Sync Pulse -+ */ -+#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode -+ * with Sync events -+ */ -+#define BURST_MODE 0x03 /* Burst Mode */ -+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */ -+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */ -+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */ -+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */ -+#define DBI_NOT_SUPPORTED 0x00 /* command mode -+ * is not supported -+ */ -+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */ -+#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */ -+#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */ -+#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least -+ * 0x100 Byte with 32 -+ * byte alignment -+ */ -+#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least -+ * 0x100 Byte with 32 -+ * byte alignment -+ */ -+#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4)) -+#define SKU_83 0x01 -+#define SKU_100 0x02 -+#define SKU_100L 0x04 -+#define SKU_BYPASS 0x08 -+#if 0 -+/* ************************************************************************* *\ -+DSI command data structure -+\* ************************************************************************* */ -+union DSI_LONG_PACKET_HEADER { -+ u32 DSI_longPacketHeader; -+ struct { -+ u8 dataID; -+ u16 wordCount; -+ u8 ECC; -+ }; -+#if 0 /*FIXME JLIU7 */ -+ struct { -+ u8 DT:6; -+ u8 VC:2; -+ }; -+#endif /*FIXME JLIU7 */ -+}; -+ -+union MIPI_ADPT_CMD_LNG_REG { -+ u32 commnadLengthReg; -+ struct { -+ u8 command0; -+ u8 command1; -+ u8 command2; -+ u8 command3; -+ }; -+}; -+ -+struct SET_COLUMN_ADDRESS_DATA { -+ u8 command; -+ u16 SC; /* Start Column */ -+ u16 EC; /* End Column */ -+}; -+ -+struct SET_PAGE_ADDRESS_DATA { -+ u8 command; -+ u16 SP; /* Start Page */ -+ u16 EP; /* End Page */ -+}; -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c ---- a/drivers/gpu/drm/psb/psb_intel_sdvo.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1232 @@ -+/* -+ * Copyright © 2006-2007 Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ */ -+/* -+ * Copyright 2006 Dave Airlie <airlied@linux.ie> -+ * Jesse Barnes <jesse.barnes@intel.com> -+ */ -+ -+#include <linux/i2c.h> -+#include <linux/delay.h> -+#include <drm/drm_crtc.h> -+#include "psb_intel_sdvo_regs.h" -+ -+struct psb_intel_sdvo_priv { -+ struct psb_intel_i2c_chan *i2c_bus; -+ int slaveaddr; -+ int output_device; -+ -+ u16 active_outputs; -+ -+ struct psb_intel_sdvo_caps caps; -+ int pixel_clock_min, pixel_clock_max; -+ -+ int save_sdvo_mult; -+ u16 save_active_outputs; -+ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; -+ struct psb_intel_sdvo_dtd save_output_dtd[16]; -+ u32 save_SDVOX; -+}; -+ -+/** -+ * Writes the SDVOB or SDVOC with the given value, but always writes both -+ * SDVOB and SDVOC to work around apparent hardware issues (according to -+ * comments in the BIOS). -+ */ -+void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val) -+{ -+ struct drm_device *dev = psb_intel_output->base.dev; -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ u32 bval = val, cval = val; -+ int i; -+ -+ if (sdvo_priv->output_device == SDVOB) -+ cval = REG_READ(SDVOC); -+ else -+ bval = REG_READ(SDVOB); -+ /* -+ * Write the registers twice for luck. Sometimes, -+ * writing them only once doesn't appear to 'stick'. -+ * The BIOS does this too. Yay, magic -+ */ -+ for (i = 0; i < 2; i++) { -+ REG_WRITE(SDVOB, bval); -+ REG_READ(SDVOB); -+ REG_WRITE(SDVOC, cval); -+ REG_READ(SDVOC); -+ } -+} -+ -+static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output, -+ u8 addr, u8 *ch) -+{ -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ u8 out_buf[2]; -+ u8 buf[2]; -+ int ret; -+ -+ struct i2c_msg msgs[] = { -+ { -+ .addr = sdvo_priv->i2c_bus->slave_addr, -+ .flags = 0, -+ .len = 1, -+ .buf = out_buf, -+ }, -+ { -+ .addr = sdvo_priv->i2c_bus->slave_addr, -+ .flags = I2C_M_RD, -+ .len = 1, -+ .buf = buf, -+ } -+ }; -+ -+ out_buf[0] = addr; -+ out_buf[1] = 0; -+ -+ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2); -+ if (ret == 2) { -+ /* DRM_DEBUG("got back from addr %02X = %02x\n", -+ * out_buf[0], buf[0]); -+ */ -+ *ch = buf[0]; -+ return true; -+ } -+ -+ DRM_DEBUG("i2c transfer returned %d\n", ret); -+ return false; -+} -+ -+static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output, -+ int addr, u8 ch) -+{ -+ u8 out_buf[2]; -+ struct i2c_msg msgs[] = { -+ { -+ .addr = psb_intel_output->i2c_bus->slave_addr, -+ .flags = 0, -+ .len = 2, -+ .buf = out_buf, -+ } -+ }; -+ -+ out_buf[0] = addr; -+ out_buf[1] = ch; -+ -+ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1) -+ return true; -+ return false; -+} -+ -+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} -+/** Mapping of command numbers to names, for debug output */ -+const static struct _sdvo_cmd_name { -+ u8 cmd; -+ char *name; -+} sdvo_cmd_names[] = { -+SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), -+ SDVO_CMD_NAME_ENTRY -+ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), -+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),}; -+ -+#define SDVO_NAME(dev_priv) \ -+ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") -+#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv) -+ -+static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd, -+ void *args, int args_len) -+{ -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ int i; -+ -+ if (1) { -+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); -+ for (i = 0; i < args_len; i++) -+ printk(KERN_INFO"%02X ", ((u8 *) args)[i]); -+ for (; i < 8; i++) -+ printk(" "); -+ for (i = 0; -+ i < -+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); -+ i++) { -+ if (cmd == sdvo_cmd_names[i].cmd) { -+ printk("(%s)", sdvo_cmd_names[i].name); -+ break; -+ } -+ } -+ if (i == -+ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0])) -+ printk("(%02X)", cmd); -+ printk("\n"); -+ } -+ -+ for (i = 0; i < args_len; i++) { -+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i, -+ ((u8 *) args)[i]); -+ } -+ -+ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd); -+} -+ -+static const char *cmd_status_names[] = { -+ "Power on", -+ "Success", -+ "Not supported", -+ "Invalid arg", -+ "Pending", -+ "Target not specified", -+ "Scaling not supported" -+}; -+ -+static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output, -+ void *response, int response_len) -+{ -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ int i; -+ u8 status; -+ u8 retry = 50; -+ -+ while (retry--) { -+ /* Read the command response */ -+ for (i = 0; i < response_len; i++) { -+ psb_intel_sdvo_read_byte(psb_intel_output, -+ SDVO_I2C_RETURN_0 + i, -+ &((u8 *) response)[i]); -+ } -+ -+ /* read the return status */ -+ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS, -+ &status); -+ -+ if (1) { -+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv)); -+ for (i = 0; i < response_len; i++) -+ printk(KERN_INFO"%02X ", ((u8 *) response)[i]); -+ for (; i < 8; i++) -+ printk(" "); -+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) -+ printk(KERN_INFO"(%s)", -+ cmd_status_names[status]); -+ else -+ printk(KERN_INFO"(??? %d)", status); -+ printk("\n"); -+ } -+ -+ if (status != SDVO_CMD_STATUS_PENDING) -+ return status; -+ -+ mdelay(50); -+ } -+ -+ return status; -+} -+ -+int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) -+{ -+ if (mode->clock >= 100000) -+ return 1; -+ else if (mode->clock >= 50000) -+ return 2; -+ else -+ return 4; -+} -+ -+/** -+ * Don't check status code from this as it switches the bus back to the -+ * SDVO chips which defeats the purpose of doing a bus switch in the first -+ * place. -+ */ -+void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output, -+ u8 target) -+{ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, -+ &target, 1); -+} -+ -+static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output, -+ bool target_0, bool target_1) -+{ -+ struct psb_intel_sdvo_set_target_input_args targets = { 0 }; -+ u8 status; -+ -+ if (target_0 && target_1) -+ return SDVO_CMD_STATUS_NOTSUPP; -+ -+ if (target_1) -+ targets.target_1 = 1; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT, -+ &targets, sizeof(targets)); -+ -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ -+ return status == SDVO_CMD_STATUS_SUCCESS; -+} -+ -+/** -+ * Return whether each input is trained. -+ * -+ * This function is making an assumption about the layout of the response, -+ * which should be checked against the docs. -+ */ -+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output -+ *psb_intel_output, bool *input_1, -+ bool *input_2) -+{ -+ struct psb_intel_sdvo_get_trained_inputs_response response; -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS, -+ NULL, 0); -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, &response, -+ sizeof(response)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ *input_1 = response.input0_trained; -+ *input_2 = response.input1_trained; -+ return true; -+} -+ -+static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output -+ *psb_intel_output, u16 *outputs) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, -+ NULL, 0); -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, outputs, -+ sizeof(*outputs)); -+ -+ return status == SDVO_CMD_STATUS_SUCCESS; -+} -+ -+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output -+ *psb_intel_output, u16 outputs) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, -+ &outputs, sizeof(outputs)); -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ return status == SDVO_CMD_STATUS_SUCCESS; -+} -+ -+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output -+ *psb_intel_output, int mode) -+{ -+ u8 status, state = SDVO_ENCODER_STATE_ON; -+ -+ switch (mode) { -+ case DRM_MODE_DPMS_ON: -+ state = SDVO_ENCODER_STATE_ON; -+ break; -+ case DRM_MODE_DPMS_STANDBY: -+ state = SDVO_ENCODER_STATE_STANDBY; -+ break; -+ case DRM_MODE_DPMS_SUSPEND: -+ state = SDVO_ENCODER_STATE_SUSPEND; -+ break; -+ case DRM_MODE_DPMS_OFF: -+ state = SDVO_ENCODER_STATE_OFF; -+ break; -+ } -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, -+ sizeof(state)); -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ -+ return status == SDVO_CMD_STATUS_SUCCESS; -+} -+ -+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output -+ *psb_intel_output, -+ int *clock_min, -+ int *clock_max) -+{ -+ struct psb_intel_sdvo_pixel_clock_range clocks; -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL, -+ 0); -+ -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, &clocks, -+ sizeof(clocks)); -+ -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ /* Convert the values from units of 10 kHz to kHz. */ -+ *clock_min = clocks.min * 10; -+ *clock_max = clocks.max * 10; -+ -+ return true; -+} -+ -+static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output, -+ u16 outputs) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT, -+ &outputs, sizeof(outputs)); -+ -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ return status == SDVO_CMD_STATUS_SUCCESS; -+} -+ -+static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output, -+ u8 cmd, struct psb_intel_sdvo_dtd *dtd) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0); -+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, -+ sizeof(dtd->part1)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0); -+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, -+ sizeof(dtd->part2)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ return true; -+} -+ -+static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output, -+ struct psb_intel_sdvo_dtd *dtd) -+{ -+ return psb_intel_sdvo_get_timing(psb_intel_output, -+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, -+ dtd); -+} -+ -+static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output, -+ struct psb_intel_sdvo_dtd *dtd) -+{ -+ return psb_intel_sdvo_get_timing(psb_intel_output, -+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, -+ dtd); -+} -+ -+static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output, -+ u8 cmd, struct psb_intel_sdvo_dtd *dtd) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1, -+ sizeof(dtd->part1)); -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2, -+ sizeof(dtd->part2)); -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ return true; -+} -+ -+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output, -+ struct psb_intel_sdvo_dtd *dtd) -+{ -+ return psb_intel_sdvo_set_timing(psb_intel_output, -+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, -+ dtd); -+} -+ -+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output, -+ struct psb_intel_sdvo_dtd *dtd) -+{ -+ return psb_intel_sdvo_set_timing(psb_intel_output, -+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, -+ dtd); -+} -+ -+#if 0 -+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output -+ *psb_intel_output, -+ struct psb_intel_sdvo_dtd -+ *dtd) -+{ -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, -+ NULL, 0); -+ -+ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1, -+ sizeof(dtd->part1)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, -+ NULL, 0); -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2, -+ sizeof(dtd->part2)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ return true; -+} -+#endif -+ -+static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output -+ *psb_intel_output) -+{ -+ u8 response, status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, -+ NULL, 0); -+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1); -+ -+ if (status != SDVO_CMD_STATUS_SUCCESS) { -+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n"); -+ return SDVO_CLOCK_RATE_MULT_1X; -+ } else { -+ DRM_DEBUG("Current clock rate multiplier: %d\n", response); -+ } -+ -+ return response; -+} -+ -+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output -+ *psb_intel_output, u8 val) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, -+ &val, 1); -+ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ return true; -+} -+ -+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO -+ * device will be told of the multiplier during mode_set. -+ */ -+ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode); -+ return true; -+} -+ -+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder, -+ struct drm_display_mode *mode, -+ struct drm_display_mode *adjusted_mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct drm_crtc *crtc = encoder->crtc; -+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); -+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ u16 width, height; -+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len; -+ u16 h_sync_offset, v_sync_offset; -+ u32 sdvox; -+ struct psb_intel_sdvo_dtd output_dtd; -+ int sdvo_pixel_multiply; -+ -+ if (!mode) -+ return; -+ -+ width = mode->crtc_hdisplay; -+ height = mode->crtc_vdisplay; -+ -+ /* do some mode translations */ -+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; -+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; -+ -+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; -+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; -+ -+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; -+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; -+ -+ output_dtd.part1.clock = mode->clock / 10; -+ output_dtd.part1.h_active = width & 0xff; -+ output_dtd.part1.h_blank = h_blank_len & 0xff; -+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | -+ ((h_blank_len >> 8) & 0xf); -+ output_dtd.part1.v_active = height & 0xff; -+ output_dtd.part1.v_blank = v_blank_len & 0xff; -+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | -+ ((v_blank_len >> 8) & 0xf); -+ -+ output_dtd.part2.h_sync_off = h_sync_offset; -+ output_dtd.part2.h_sync_width = h_sync_len & 0xff; -+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | -+ (v_sync_len & 0xf); -+ output_dtd.part2.sync_off_width_high = -+ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | -+ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); -+ -+ output_dtd.part2.dtd_flags = 0x18; -+ if (mode->flags & DRM_MODE_FLAG_PHSYNC) -+ output_dtd.part2.dtd_flags |= 0x2; -+ if (mode->flags & DRM_MODE_FLAG_PVSYNC) -+ output_dtd.part2.dtd_flags |= 0x4; -+ -+ output_dtd.part2.sdvo_flags = 0; -+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; -+ output_dtd.part2.reserved = 0; -+ -+ /* Set the output timing to the screen */ -+ psb_intel_sdvo_set_target_output(psb_intel_output, -+ sdvo_priv->active_outputs); -+ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd); -+ -+ /* Set the input timing to the screen. Assume always input 0. */ -+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); -+ -+ /* We would like to use i830_sdvo_create_preferred_input_timing() to -+ * provide the device with a timing it can support, if it supports that -+ * feature. However, presumably we would need to adjust the CRTC to -+ * output the preferred timing, and we don't support that currently. -+ */ -+#if 0 -+ success = -+ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock, -+ width, height); -+ if (success) { -+ struct psb_intel_sdvo_dtd *input_dtd; -+ -+ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output, -+ &input_dtd); -+ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd); -+ } -+#else -+ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd); -+#endif -+ -+ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) { -+ case 1: -+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, -+ SDVO_CLOCK_RATE_MULT_1X); -+ break; -+ case 2: -+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, -+ SDVO_CLOCK_RATE_MULT_2X); -+ break; -+ case 4: -+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, -+ SDVO_CLOCK_RATE_MULT_4X); -+ break; -+ } -+ -+ /* Set the SDVO control regs. */ -+ if (0 /*IS_I965GM(dev) */) { -+ sdvox = SDVO_BORDER_ENABLE; -+ } else { -+ sdvox = REG_READ(sdvo_priv->output_device); -+ switch (sdvo_priv->output_device) { -+ case SDVOB: -+ sdvox &= SDVOB_PRESERVE_MASK; -+ break; -+ case SDVOC: -+ sdvox &= SDVOC_PRESERVE_MASK; -+ break; -+ } -+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; -+ } -+ if (psb_intel_crtc->pipe == 1) -+ sdvox |= SDVO_PIPE_B_SELECT; -+ -+ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode); -+ if (IS_I965G(dev)) { -+ /* done in crtc_mode_set as the dpll_md reg must be written -+ * early */ -+ } else if (IS_I945G(dev) || IS_I945GM(dev)) { -+ /* done in crtc_mode_set as it lives inside the -+ * dpll register */ -+ } else { -+ sdvox |= -+ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; -+ } -+ -+ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox); -+} -+ -+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode) -+{ -+ struct drm_device *dev = encoder->dev; -+ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder); -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ u32 temp; -+ -+ if (mode != DRM_MODE_DPMS_ON) { -+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); -+ if (0) -+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output, -+ mode); -+ -+ if (mode == DRM_MODE_DPMS_OFF) { -+ temp = REG_READ(sdvo_priv->output_device); -+ if ((temp & SDVO_ENABLE) != 0) { -+ psb_intel_sdvo_write_sdvox(psb_intel_output, -+ temp & -+ ~SDVO_ENABLE); -+ } -+ } -+ } else { -+ bool input1, input2; -+ int i; -+ u8 status; -+ -+ temp = REG_READ(sdvo_priv->output_device); -+ if ((temp & SDVO_ENABLE) == 0) -+ psb_intel_sdvo_write_sdvox(psb_intel_output, -+ temp | SDVO_ENABLE); -+ for (i = 0; i < 2; i++) -+ psb_intel_wait_for_vblank(dev); -+ -+ status = -+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, -+ &input2); -+ -+ -+ /* Warn if the device reported failure to sync. -+ * A lot of SDVO devices fail to notify of sync, but it's -+ * a given it the status is a success, we succeeded. -+ */ -+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { -+ DRM_DEBUG -+ ("First %s output reported failure to sync\n", -+ SDVO_NAME(sdvo_priv)); -+ } -+ -+ if (0) -+ psb_intel_sdvo_set_encoder_power_state(psb_intel_output, -+ mode); -+ psb_intel_sdvo_set_active_outputs(psb_intel_output, -+ sdvo_priv->active_outputs); -+ } -+ return; -+} -+ -+static void psb_intel_sdvo_save(struct drm_connector *connector) -+{ -+ struct drm_device *dev = connector->dev; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ int o; -+ -+ sdvo_priv->save_sdvo_mult = -+ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output); -+ psb_intel_sdvo_get_active_outputs(psb_intel_output, -+ &sdvo_priv->save_active_outputs); -+ -+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { -+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); -+ psb_intel_sdvo_get_input_timing(psb_intel_output, -+ &sdvo_priv->save_input_dtd_1); -+ } -+ -+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { -+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true); -+ psb_intel_sdvo_get_input_timing(psb_intel_output, -+ &sdvo_priv->save_input_dtd_2); -+ } -+ -+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) { -+ u16 this_output = (1 << o); -+ if (sdvo_priv->caps.output_flags & this_output) { -+ psb_intel_sdvo_set_target_output(psb_intel_output, -+ this_output); -+ psb_intel_sdvo_get_output_timing(psb_intel_output, -+ &sdvo_priv-> -+ save_output_dtd[o]); -+ } -+ } -+ -+ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device); -+} -+ -+static void psb_intel_sdvo_restore(struct drm_connector *connector) -+{ -+ struct drm_device *dev = connector->dev; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ int o; -+ int i; -+ bool input1, input2; -+ u8 status; -+ -+ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0); -+ -+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) { -+ u16 this_output = (1 << o); -+ if (sdvo_priv->caps.output_flags & this_output) { -+ psb_intel_sdvo_set_target_output(psb_intel_output, -+ this_output); -+ psb_intel_sdvo_set_output_timing(psb_intel_output, -+ &sdvo_priv-> -+ save_output_dtd[o]); -+ } -+ } -+ -+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { -+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); -+ psb_intel_sdvo_set_input_timing(psb_intel_output, -+ &sdvo_priv->save_input_dtd_1); -+ } -+ -+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { -+ psb_intel_sdvo_set_target_input(psb_intel_output, false, true); -+ psb_intel_sdvo_set_input_timing(psb_intel_output, -+ &sdvo_priv->save_input_dtd_2); -+ } -+ -+ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output, -+ sdvo_priv->save_sdvo_mult); -+ -+ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); -+ -+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) { -+ for (i = 0; i < 2; i++) -+ psb_intel_wait_for_vblank(dev); -+ status = -+ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1, -+ &input2); -+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) -+ DRM_DEBUG -+ ("First %s output reported failure to sync\n", -+ SDVO_NAME(sdvo_priv)); -+ } -+ -+ psb_intel_sdvo_set_active_outputs(psb_intel_output, -+ sdvo_priv->save_active_outputs); -+} -+ -+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector, -+ struct drm_display_mode *mode) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv; -+ -+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) -+ return MODE_NO_DBLESCAN; -+ -+ if (sdvo_priv->pixel_clock_min > mode->clock) -+ return MODE_CLOCK_LOW; -+ -+ if (sdvo_priv->pixel_clock_max < mode->clock) -+ return MODE_CLOCK_HIGH; -+ -+ return MODE_OK; -+} -+ -+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output, -+ struct psb_intel_sdvo_caps *caps) -+{ -+ u8 status; -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, -+ 0); -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps)); -+ if (status != SDVO_CMD_STATUS_SUCCESS) -+ return false; -+ -+ return true; -+} -+ -+struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB) -+{ -+ struct drm_connector *connector = NULL; -+ struct psb_intel_output *iout = NULL; -+ struct psb_intel_sdvo_priv *sdvo; -+ -+ /* find the sdvo connector */ -+ list_for_each_entry(connector, &dev->mode_config.connector_list, -+ head) { -+ iout = to_psb_intel_output(connector); -+ -+ if (iout->type != INTEL_OUTPUT_SDVO) -+ continue; -+ -+ sdvo = iout->dev_priv; -+ -+ if (sdvo->output_device == SDVOB && sdvoB) -+ return connector; -+ -+ if (sdvo->output_device == SDVOC && !sdvoB) -+ return connector; -+ -+ } -+ -+ return NULL; -+} -+ -+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector) -+{ -+ u8 response[2]; -+ u8 status; -+ struct psb_intel_output *psb_intel_output; -+ DRM_DEBUG("\n"); -+ -+ if (!connector) -+ return 0; -+ -+ psb_intel_output = to_psb_intel_output(connector); -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, -+ NULL, 0); -+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); -+ -+ if (response[0] != 0) -+ return 1; -+ -+ return 0; -+} -+ -+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on) -+{ -+ u8 response[2]; -+ u8 status; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, -+ NULL, 0); -+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2); -+ -+ if (on) { -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, -+ 0); -+ status = -+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2); -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_SET_ACTIVE_HOT_PLUG, -+ &response, 2); -+ } else { -+ response[0] = 0; -+ response[1] = 0; -+ psb_intel_sdvo_write_cmd(psb_intel_output, -+ SDVO_CMD_SET_ACTIVE_HOT_PLUG, -+ &response, 2); -+ } -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, -+ NULL, 0); -+ psb_intel_sdvo_read_response(psb_intel_output, &response, 2); -+} -+ -+static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector -+ *connector) -+{ -+ u8 response[2]; -+ u8 status; -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, -+ NULL, 0); -+ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2); -+ -+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); -+ if ((response[0] != 0) || (response[1] != 0)) -+ return connector_status_connected; -+ else -+ return connector_status_disconnected; -+} -+ -+static int psb_intel_sdvo_get_modes(struct drm_connector *connector) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ /* set the bus switch and get the modes */ -+ psb_intel_sdvo_set_control_bus_switch(psb_intel_output, -+ SDVO_CONTROL_BUS_DDC2); -+ psb_intel_ddc_get_modes(psb_intel_output); -+ -+ if (list_empty(&connector->probed_modes)) -+ return 0; -+ return 1; -+#if 0 -+ /* Mac mini hack. On this device, I get DDC through the analog, which -+ * load-detects as disconnected. I fail to DDC through the SDVO DDC, -+ * but it does load-detect as connected. So, just steal the DDC bits -+ * from analog when we fail at finding it the right way. -+ */ -+ /* TODO */ -+ return NULL; -+ -+ return NULL; -+#endif -+} -+ -+static void psb_intel_sdvo_destroy(struct drm_connector *connector) -+{ -+ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); -+ -+ if (psb_intel_output->i2c_bus) -+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus); -+ drm_sysfs_connector_remove(connector); -+ drm_connector_cleanup(connector); -+ kfree(psb_intel_output); -+} -+ -+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = { -+ .dpms = psb_intel_sdvo_dpms, -+ .mode_fixup = psb_intel_sdvo_mode_fixup, -+ .prepare = psb_intel_encoder_prepare, -+ .mode_set = psb_intel_sdvo_mode_set, -+ .commit = psb_intel_encoder_commit, -+}; -+ -+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = { -+ .save = psb_intel_sdvo_save, -+ .restore = psb_intel_sdvo_restore, -+ .detect = psb_intel_sdvo_detect, -+ .fill_modes = drm_helper_probe_single_connector_modes, -+ .destroy = psb_intel_sdvo_destroy, -+}; -+ -+static const struct drm_connector_helper_funcs -+ psb_intel_sdvo_connector_helper_funcs = { -+ .get_modes = psb_intel_sdvo_get_modes, -+ .mode_valid = psb_intel_sdvo_mode_valid, -+ .best_encoder = psb_intel_best_encoder, -+}; -+ -+void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder) -+{ -+ drm_encoder_cleanup(encoder); -+} -+ -+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = { -+ .destroy = psb_intel_sdvo_enc_destroy, -+}; -+ -+ -+void psb_intel_sdvo_init(struct drm_device *dev, int output_device) -+{ -+ struct drm_connector *connector; -+ struct psb_intel_output *psb_intel_output; -+ struct psb_intel_sdvo_priv *sdvo_priv; -+ struct psb_intel_i2c_chan *i2cbus = NULL; -+ int connector_type; -+ u8 ch[0x40]; -+ int i; -+ int encoder_type, output_id; -+ -+ psb_intel_output = -+ kcalloc(sizeof(struct psb_intel_output) + -+ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL); -+ if (!psb_intel_output) -+ return; -+ -+ connector = &psb_intel_output->base; -+ -+ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs, -+ DRM_MODE_CONNECTOR_Unknown); -+ drm_connector_helper_add(connector, -+ &psb_intel_sdvo_connector_helper_funcs); -+ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1); -+ psb_intel_output->type = INTEL_OUTPUT_SDVO; -+ -+ connector->interlace_allowed = 0; -+ connector->doublescan_allowed = 0; -+ -+ /* setup the DDC bus. */ -+ if (output_device == SDVOB) -+ i2cbus = -+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); -+ else -+ i2cbus = -+ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); -+ -+ if (!i2cbus) -+ goto err_connector; -+ -+ sdvo_priv->i2c_bus = i2cbus; -+ -+ if (output_device == SDVOB) { -+ output_id = 1; -+ sdvo_priv->i2c_bus->slave_addr = 0x38; -+ } else { -+ output_id = 2; -+ sdvo_priv->i2c_bus->slave_addr = 0x39; -+ } -+ -+ sdvo_priv->output_device = output_device; -+ psb_intel_output->i2c_bus = i2cbus; -+ psb_intel_output->dev_priv = sdvo_priv; -+ -+ -+ /* Read the regs to test if we can talk to the device */ -+ for (i = 0; i < 0x40; i++) { -+ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) { -+ DRM_DEBUG("No SDVO device found on SDVO%c\n", -+ output_device == SDVOB ? 'B' : 'C'); -+ goto err_i2c; -+ } -+ } -+ -+ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps); -+ -+ memset(&sdvo_priv->active_outputs, 0, -+ sizeof(sdvo_priv->active_outputs)); -+ -+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ -+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) { -+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; -+ connector->display_info.subpixel_order = -+ SubPixelHorizontalRGB; -+ encoder_type = DRM_MODE_ENCODER_DAC; -+ connector_type = DRM_MODE_CONNECTOR_VGA; -+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) { -+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; -+ connector->display_info.subpixel_order = -+ SubPixelHorizontalRGB; -+ encoder_type = DRM_MODE_ENCODER_DAC; -+ connector_type = DRM_MODE_CONNECTOR_VGA; -+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) { -+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; -+ connector->display_info.subpixel_order = -+ SubPixelHorizontalRGB; -+ encoder_type = DRM_MODE_ENCODER_TMDS; -+ connector_type = DRM_MODE_CONNECTOR_DVID; -+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) { -+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; -+ connector->display_info.subpixel_order = -+ SubPixelHorizontalRGB; -+ encoder_type = DRM_MODE_ENCODER_TMDS; -+ connector_type = DRM_MODE_CONNECTOR_DVID; -+ } else { -+ unsigned char bytes[2]; -+ -+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2); -+ DRM_DEBUG -+ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", -+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]); -+ goto err_i2c; -+ } -+ -+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs, -+ encoder_type); -+ drm_encoder_helper_add(&psb_intel_output->enc, -+ &psb_intel_sdvo_helper_funcs); -+ connector->connector_type = connector_type; -+ -+ drm_mode_connector_attach_encoder(&psb_intel_output->base, -+ &psb_intel_output->enc); -+ drm_sysfs_connector_add(connector); -+ -+ /* Set the input timing to the screen. Assume always input 0. */ -+ psb_intel_sdvo_set_target_input(psb_intel_output, true, false); -+ -+ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output, -+ &sdvo_priv->pixel_clock_min, -+ &sdvo_priv-> -+ pixel_clock_max); -+ -+ -+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " -+ "clock range %dMHz - %dMHz, " -+ "input 1: %c, input 2: %c, " -+ "output 1: %c, output 2: %c\n", -+ SDVO_NAME(sdvo_priv), -+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, -+ sdvo_priv->caps.device_rev_id, -+ sdvo_priv->pixel_clock_min / 1000, -+ sdvo_priv->pixel_clock_max / 1000, -+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', -+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', -+ /* check currently supported outputs */ -+ sdvo_priv->caps.output_flags & -+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', -+ sdvo_priv->caps.output_flags & -+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); -+ -+ psb_intel_output->ddc_bus = i2cbus; -+ -+ return; -+ -+err_i2c: -+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus); -+err_connector: -+ drm_connector_cleanup(connector); -+ kfree(psb_intel_output); -+ -+ return; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h ---- a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,328 @@ -+/* -+ * Copyright (c) 2008, Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -+ * DEALINGS IN THE SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ */ -+ -+/** -+ * @file SDVO command definitions and structures. -+ */ -+ -+#define SDVO_OUTPUT_FIRST (0) -+#define SDVO_OUTPUT_TMDS0 (1 << 0) -+#define SDVO_OUTPUT_RGB0 (1 << 1) -+#define SDVO_OUTPUT_CVBS0 (1 << 2) -+#define SDVO_OUTPUT_SVID0 (1 << 3) -+#define SDVO_OUTPUT_YPRPB0 (1 << 4) -+#define SDVO_OUTPUT_SCART0 (1 << 5) -+#define SDVO_OUTPUT_LVDS0 (1 << 6) -+#define SDVO_OUTPUT_TMDS1 (1 << 8) -+#define SDVO_OUTPUT_RGB1 (1 << 9) -+#define SDVO_OUTPUT_CVBS1 (1 << 10) -+#define SDVO_OUTPUT_SVID1 (1 << 11) -+#define SDVO_OUTPUT_YPRPB1 (1 << 12) -+#define SDVO_OUTPUT_SCART1 (1 << 13) -+#define SDVO_OUTPUT_LVDS1 (1 << 14) -+#define SDVO_OUTPUT_LAST (14) -+ -+struct psb_intel_sdvo_caps { -+ u8 vendor_id; -+ u8 device_id; -+ u8 device_rev_id; -+ u8 sdvo_version_major; -+ u8 sdvo_version_minor; -+ unsigned int sdvo_inputs_mask:2; -+ unsigned int smooth_scaling:1; -+ unsigned int sharp_scaling:1; -+ unsigned int up_scaling:1; -+ unsigned int down_scaling:1; -+ unsigned int stall_support:1; -+ unsigned int pad:1; -+ u16 output_flags; -+} __attribute__ ((packed)); -+ -+/** This matches the EDID DTD structure, more or less */ -+struct psb_intel_sdvo_dtd { -+ struct { -+ u16 clock; /**< pixel clock, in 10kHz units */ -+ u8 h_active; /**< lower 8 bits (pixels) */ -+ u8 h_blank; /**< lower 8 bits (pixels) */ -+ u8 h_high; /**< upper 4 bits each h_active, h_blank */ -+ u8 v_active; /**< lower 8 bits (lines) */ -+ u8 v_blank; /**< lower 8 bits (lines) */ -+ u8 v_high; /**< upper 4 bits each v_active, v_blank */ -+ } part1; -+ -+ struct { -+ u8 h_sync_off; -+ /**< lower 8 bits, from hblank start */ -+ u8 h_sync_width;/**< lower 8 bits (pixels) */ -+ /** lower 4 bits each vsync offset, vsync width */ -+ u8 v_sync_off_width; -+ /** -+ * 2 high bits of hsync offset, 2 high bits of hsync width, -+ * bits 4-5 of vsync offset, and 2 high bits of vsync width. -+ */ -+ u8 sync_off_width_high; -+ u8 dtd_flags; -+ u8 sdvo_flags; -+ /** bits 6-7 of vsync offset at bits 6-7 */ -+ u8 v_sync_off_high; -+ u8 reserved; -+ } part2; -+} __attribute__ ((packed)); -+ -+struct psb_intel_sdvo_pixel_clock_range { -+ u16 min; /**< pixel clock, in 10kHz units */ -+ u16 max; /**< pixel clock, in 10kHz units */ -+} __attribute__ ((packed)); -+ -+struct psb_intel_sdvo_preferred_input_timing_args { -+ u16 clock; -+ u16 width; -+ u16 height; -+} __attribute__ ((packed)); -+ -+/* I2C registers for SDVO */ -+#define SDVO_I2C_ARG_0 0x07 -+#define SDVO_I2C_ARG_1 0x06 -+#define SDVO_I2C_ARG_2 0x05 -+#define SDVO_I2C_ARG_3 0x04 -+#define SDVO_I2C_ARG_4 0x03 -+#define SDVO_I2C_ARG_5 0x02 -+#define SDVO_I2C_ARG_6 0x01 -+#define SDVO_I2C_ARG_7 0x00 -+#define SDVO_I2C_OPCODE 0x08 -+#define SDVO_I2C_CMD_STATUS 0x09 -+#define SDVO_I2C_RETURN_0 0x0a -+#define SDVO_I2C_RETURN_1 0x0b -+#define SDVO_I2C_RETURN_2 0x0c -+#define SDVO_I2C_RETURN_3 0x0d -+#define SDVO_I2C_RETURN_4 0x0e -+#define SDVO_I2C_RETURN_5 0x0f -+#define SDVO_I2C_RETURN_6 0x10 -+#define SDVO_I2C_RETURN_7 0x11 -+#define SDVO_I2C_VENDOR_BEGIN 0x20 -+ -+/* Status results */ -+#define SDVO_CMD_STATUS_POWER_ON 0x0 -+#define SDVO_CMD_STATUS_SUCCESS 0x1 -+#define SDVO_CMD_STATUS_NOTSUPP 0x2 -+#define SDVO_CMD_STATUS_INVALID_ARG 0x3 -+#define SDVO_CMD_STATUS_PENDING 0x4 -+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5 -+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6 -+ -+/* SDVO commands, argument/result registers */ -+ -+#define SDVO_CMD_RESET 0x01 -+ -+/** Returns a struct psb_intel_sdvo_caps */ -+#define SDVO_CMD_GET_DEVICE_CAPS 0x02 -+ -+#define SDVO_CMD_GET_FIRMWARE_REV 0x86 -+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0 -+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1 -+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2 -+ -+/** -+ * Reports which inputs are trained (managed to sync). -+ * -+ * Devices must have trained within 2 vsyncs of a mode change. -+ */ -+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03 -+struct psb_intel_sdvo_get_trained_inputs_response { -+ unsigned int input0_trained:1; -+ unsigned int input1_trained:1; -+ unsigned int pad:6; -+} __attribute__ ((packed)); -+ -+/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */ -+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04 -+ -+/** -+ * Sets the current set of active outputs. -+ * -+ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP -+ * on multi-output devices. -+ */ -+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05 -+ -+/** -+ * Returns the current mapping of SDVO inputs to outputs on the device. -+ * -+ * Returns two struct psb_intel_sdvo_output_flags structures. -+ */ -+#define SDVO_CMD_GET_IN_OUT_MAP 0x06 -+ -+/** -+ * Sets the current mapping of SDVO inputs to outputs on the device. -+ * -+ * Takes two struct i380_sdvo_output_flags structures. -+ */ -+#define SDVO_CMD_SET_IN_OUT_MAP 0x07 -+ -+/** -+ * Returns a struct psb_intel_sdvo_output_flags of attached displays. -+ */ -+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b -+ -+/** -+ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging. -+ */ -+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c -+ -+/** -+ * Takes a struct psb_intel_sdvo_output_flags. -+ */ -+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d -+ -+/** -+ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug -+ * interrupts enabled. -+ */ -+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e -+ -+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f -+struct psb_intel_sdvo_get_interrupt_event_source_response { -+ u16 interrupt_status; -+ unsigned int ambient_light_interrupt:1; -+ unsigned int pad:7; -+} __attribute__ ((packed)); -+ -+/** -+ * Selects which input is affected by future input commands. -+ * -+ * Commands affected include SET_INPUT_TIMINGS_PART[12], -+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12], -+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS. -+ */ -+#define SDVO_CMD_SET_TARGET_INPUT 0x10 -+struct psb_intel_sdvo_set_target_input_args { -+ unsigned int target_1:1; -+ unsigned int pad:7; -+} __attribute__ ((packed)); -+ -+/** -+ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by -+ * future output commands. -+ * -+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12], -+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE. -+ */ -+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11 -+ -+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12 -+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13 -+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14 -+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15 -+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16 -+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17 -+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18 -+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19 -+/* Part 1 */ -+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0 -+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1 -+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2 -+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3 -+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4 -+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5 -+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6 -+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7 -+/* Part 2 */ -+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0 -+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1 -+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2 -+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3 -+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4 -+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7) -+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5) -+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3) -+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1) -+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5 -+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7) -+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6) -+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6) -+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4) -+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4) -+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4) -+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4) -+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6 -+ -+/** -+ * Generates a DTD based on the given width, height, and flags. -+ * -+ * This will be supported by any device supporting scaling or interlaced -+ * modes. -+ */ -+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a -+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0 -+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1 -+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2 -+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3 -+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4 -+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5 -+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6 -+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0) -+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1) -+ -+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b -+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c -+ -+/** Returns a struct psb_intel_sdvo_pixel_clock_range */ -+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d -+/** Returns a struct psb_intel_sdvo_pixel_clock_range */ -+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e -+ -+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */ -+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f -+ -+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ -+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20 -+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */ -+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21 -+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0) -+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1) -+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) -+ -+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 -+ -+#define SDVO_CMD_GET_TV_FORMAT 0x28 -+ -+#define SDVO_CMD_SET_TV_FORMAT 0x29 -+ -+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a -+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b -+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c -+# define SDVO_ENCODER_STATE_ON (1 << 0) -+# define SDVO_ENCODER_STATE_STANDBY (1 << 1) -+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) -+# define SDVO_ENCODER_STATE_OFF (1 << 3) -+ -+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 -+ -+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a -+# define SDVO_CONTROL_BUS_PROM 0x0 -+# define SDVO_CONTROL_BUS_DDC1 0x1 -+# define SDVO_CONTROL_BUS_DDC2 0x2 -+# define SDVO_CONTROL_BUS_DDC3 0x3 -diff -uNr a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c ---- a/drivers/gpu/drm/psb/psb_irq.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_irq.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,420 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_reg.h" -+#include "psb_msvdx.h" -+#include "lnc_topaz.h" -+ -+/* -+ * Video display controller interrupt. -+ */ -+ -+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ int wake = 0; -+ -+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) { -+#ifdef PSB_FIXME -+ atomic_inc(&dev->vbl_received); -+#endif -+ wake = 1; -+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE | -+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT); -+ } -+ -+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) { -+#ifdef PSB_FIXME -+ atomic_inc(&dev->vbl_received2); -+#endif -+ wake = 1; -+ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE | -+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT); -+ } -+ -+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R); -+ (void) PSB_RVDC32(PSB_INT_IDENTITY_R); -+ DRM_READMEMORYBARRIER(); -+ -+#ifdef PSB_FIXME -+ if (wake) { -+ DRM_WAKEUP(&dev->vbl_queue); -+ drm_vbl_send_signals(dev); -+ } -+#endif -+} -+ -+/* -+ * SGX interrupt source 1. -+ */ -+ -+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat, -+ uint32_t sgx_stat2) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) { -+ DRM_WAKEUP(&dev_priv->event_2d_queue); -+ psb_fence_handler(dev, PSB_ENGINE_2D); -+ } -+ -+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT)) -+ psb_print_pagefault(dev_priv); -+ -+ psb_scheduler_handler(dev_priv, sgx_stat); -+} -+ -+/* -+ * MSVDX interrupt. -+ */ -+static void psb_msvdx_interrupt(struct drm_device *dev, -+ uint32_t msvdx_stat) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) { -+ /*Ideally we should we should never get to this */ -+ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n", -+ msvdx_stat, dev_priv->fence2_irq_on); -+ -+ /* Pause MMU */ -+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK, -+ MSVDX_MMU_CONTROL0); -+ DRM_WRITEMEMORYBARRIER(); -+ -+ /* Clear this interupt bit only */ -+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK, -+ MSVDX_INTERRUPT_CLEAR); -+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR); -+ DRM_READMEMORYBARRIER(); -+ -+ dev_priv->msvdx_needs_reset = 1; -+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) { -+ PSB_DEBUG_IRQ -+ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n", -+ msvdx_stat, dev_priv->fence2_irq_on); -+ -+ /* Clear all interupt bits */ -+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR); -+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR); -+ DRM_READMEMORYBARRIER(); -+ -+ psb_msvdx_mtx_interrupt(dev); -+ } -+} -+ -+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) -+{ -+ struct drm_device *dev = (struct drm_device *) arg; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0; -+ uint32_t sgx_stat = 0; -+ uint32_t sgx_stat2 = 0; -+ uint32_t sgx_int = 0; -+ int handled = 0; -+ -+ spin_lock(&dev_priv->irqmask_lock); -+ -+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); -+ -+ if (vdc_stat & _PSB_IRQ_SGX_FLAG) { -+ PSB_DEBUG_IRQ("Got SGX interrupt\n"); -+ sgx_int = 1; -+ } -+ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) { -+ PSB_DEBUG_IRQ("Got MSVDX interrupt\n"); -+ msvdx_int = 1; -+ } -+ -+ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) { -+ PSB_DEBUG_IRQ("Got TOPAX interrupt\n"); -+ topaz_int = 1; -+ } -+ if (sgx_int && (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)) { -+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS); -+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2); -+ -+ sgx_stat2 &= dev_priv->sgx2_irq_mask; -+ sgx_stat &= dev_priv->sgx_irq_mask; -+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2); -+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR); -+ } else if (unlikely(PSB_D_PM & drm_psb_debug)) { -+ if (sgx_int) -+ PSB_DEBUG_PM("sgx int in down mode\n"); -+ } -+ vdc_stat &= dev_priv->vdc_irq_mask; -+ spin_unlock(&dev_priv->irqmask_lock); -+ -+ if (msvdx_int) { -+ uint32_t msvdx_stat = 0; -+ -+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS); -+ psb_msvdx_interrupt(dev, msvdx_stat); -+ handled = 1; -+ } -+ -+ if (IS_MRST(dev) && topaz_int) { -+ uint32_t topaz_stat = 0; -+ -+ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat); -+ lnc_topaz_interrupt (dev, topaz_stat); -+ handled = 1; -+ } -+ -+ if (vdc_stat) { -+ /* MSVDX IRQ status is part of vdc_irq_mask */ -+ psb_vdc_interrupt(dev, vdc_stat); -+ handled = 1; -+ } -+ -+ if (sgx_stat || sgx_stat2) { -+ -+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2); -+ handled = 1; -+ } -+ -+ if (!handled) -+ return IRQ_NONE; -+ -+ -+ return IRQ_HANDLED; -+} -+ -+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv) -+{ -+ unsigned long mtx_int = 0; -+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG; -+ -+ /* Clear MTX interrupt */ -+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, -+ 1); -+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR); -+} -+ -+void psb_irq_preinstall(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ unsigned long mtx_int = 0; -+ unsigned long irqflags; -+ PSB_DEBUG_PM("psb_irq_preinstall\n"); -+ -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ -+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); -+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R); -+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); -+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ -+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER | -+ _PSB_CE_DPM_3D_MEM_FREE | -+ _PSB_CE_TA_FINISHED | -+ _PSB_CE_DPM_REACHED_MEM_THRESH | -+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL | -+ _PSB_CE_DPM_OUT_OF_MEMORY_MT | -+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT; -+ -+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT; -+ -+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG; -+ -+ if (!drm_psb_disable_vsync) -+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG | -+ _PSB_VSYNC_PIPEB_FLAG; -+ -+ /* Clear MTX interrupt */ -+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, -+ CR_MTX_IRQ, 1); -+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR); -+ -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+ up_read(&dev_priv->sgx_sem); -+} -+ -+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv) -+{ -+ /* Enable Mtx Interupt to host */ -+ unsigned long enables = 0; -+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n"); -+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, -+ 1); -+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE); -+} -+ -+int psb_irq_postinstall(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ unsigned long irqflags; -+ unsigned long enables = 0; -+ -+ PSB_DEBUG_PM("psb_irq_postinstall\n"); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ -+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); -+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2); -+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ -+ /* MSVDX IRQ Setup, Enable Mtx Interupt to host */ -+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n"); -+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, -+ CR_MTX_IRQ, 1); -+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE); -+ -+ dev_priv->irq_enabled = 1; -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+ up_read(&dev_priv->sgx_sem); -+ return 0; -+} -+ -+void psb_irq_uninstall(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ unsigned long irqflags; -+ PSB_DEBUG_PM("psb_irq_uninstall\n"); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ -+ dev_priv->sgx_irq_mask = 0x00000000; -+ dev_priv->sgx2_irq_mask = 0x00000000; -+ dev_priv->vdc_irq_mask = 0x00000000; -+ -+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); -+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); -+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); -+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE); -+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2); -+ wmb(); -+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R); -+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), -+ PSB_CR_EVENT_HOST_CLEAR); -+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), -+ PSB_CR_EVENT_HOST_CLEAR2); -+ -+ /* MSVDX IRQ Setup */ -+ /* Clear interrupt enabled flag */ -+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE); -+ -+ if (IS_MRST(dev)) -+ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0); -+ -+ dev_priv->irq_enabled = 0; -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+ up_read(&dev_priv->sgx_sem); -+} -+ -+void psb_2D_irq_off(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irqflags; -+ uint32_t old_mask; -+ uint32_t cleared_mask; -+ -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ --dev_priv->irqen_count_2d; -+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) { -+ -+ old_mask = dev_priv->sgx_irq_mask; -+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE; -+ PSB_WSGX32(dev_priv->sgx_irq_mask, -+ PSB_CR_EVENT_HOST_ENABLE); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ -+ cleared_mask = -+ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask; -+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR); -+ } -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+} -+ -+void psb_2D_irq_on(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) { -+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE; -+ PSB_WSGX32(dev_priv->sgx_irq_mask, -+ PSB_CR_EVENT_HOST_ENABLE); -+ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ } -+ ++dev_priv->irqen_count_2d; -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+} -+ -+#ifdef PSB_FIXME -+static int psb_vblank_do_wait(struct drm_device *dev, -+ unsigned int *sequence, atomic_t *counter) -+{ -+ unsigned int cur_vblank; -+ int ret = 0; -+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, -+ (((cur_vblank = atomic_read(counter)) -+ - *sequence) <= (1 << 23))); -+ *sequence = cur_vblank; -+ -+ return ret; -+} -+#endif -+ -+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ if (dev_priv->irq_enabled) { -+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG; -+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); -+ (void) PSB_RSGX32(PSB_INT_ENABLE_R); -+ } -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+} -+ -+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); -+ if (dev_priv->irq_enabled) { -+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG; -+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); -+ (void) PSB_RSGX32(PSB_INT_ENABLE_R); -+ } -+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c ---- a/drivers/gpu/drm/psb/psb_mmu.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_mmu.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1069 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_reg.h" -+ -+/* -+ * Code for the SGX MMU: -+ */ -+ -+/* -+ * clflush on one processor only: -+ * clflush should apparently flush the cache line on all processors in an -+ * SMP system. -+ */ -+ -+/* -+ * kmap atomic: -+ * The usage of the slots must be completely encapsulated within a spinlock, and -+ * no other functions that may be using the locks for other purposed may be -+ * called from within the locked region. -+ * Since the slots are per processor, this will guarantee that we are the only -+ * user. -+ */ -+ -+/* -+ * TODO: Inserting ptes from an interrupt handler: -+ * This may be desirable for some SGX functionality where the GPU can fault in -+ * needed pages. For that, we need to make an atomic insert_pages function, that -+ * may fail. -+ * If it fails, the caller need to insert the page using a workqueue function, -+ * but on average it should be fast. -+ */ -+ -+struct psb_mmu_driver { -+ /* protects driver- and pd structures. Always take in read mode -+ * before taking the page table spinlock. -+ */ -+ struct rw_semaphore sem; -+ -+ /* protects page tables, directory tables and pt tables. -+ * and pt structures. -+ */ -+ spinlock_t lock; -+ -+ atomic_t needs_tlbflush; -+ -+ uint8_t __iomem *register_map; -+ struct psb_mmu_pd *default_pd; -+ uint32_t bif_ctrl; -+ int has_clflush; -+ int clflush_add; -+ unsigned long clflush_mask; -+ -+ struct drm_psb_private *dev_priv; -+}; -+ -+struct psb_mmu_pd; -+ -+struct psb_mmu_pt { -+ struct psb_mmu_pd *pd; -+ uint32_t index; -+ uint32_t count; -+ struct page *p; -+ uint32_t *v; -+}; -+ -+struct psb_mmu_pd { -+ struct psb_mmu_driver *driver; -+ int hw_context; -+ struct psb_mmu_pt **tables; -+ struct page *p; -+ struct page *dummy_pt; -+ struct page *dummy_page; -+ uint32_t pd_mask; -+ uint32_t invalid_pde; -+ uint32_t invalid_pte; -+}; -+ -+void topaz_mmu_flushcache(struct drm_psb_private *dev_priv); -+ -+static inline uint32_t psb_mmu_pt_index(uint32_t offset) -+{ -+ return (offset >> PSB_PTE_SHIFT) & 0x3FF; -+} -+ -+static inline uint32_t psb_mmu_pd_index(uint32_t offset) -+{ -+ return offset >> PSB_PDE_SHIFT; -+} -+ -+#if defined(CONFIG_X86) -+static inline void psb_clflush(void *addr) -+{ -+ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); -+} -+ -+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, -+ void *addr) -+{ -+ if (!driver->has_clflush) -+ return; -+ -+ mb(); -+ psb_clflush(addr); -+ mb(); -+} -+#else -+ -+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, -+ void *addr) -+{; -+} -+ -+#endif -+ -+static inline void psb_iowrite32(const struct psb_mmu_driver *d, -+ uint32_t val, uint32_t offset) -+{ -+ iowrite32(val, d->register_map + offset); -+} -+ -+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d, -+ uint32_t offset) -+{ -+ return ioread32(d->register_map + offset); -+} -+ -+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, -+ int force) -+{ -+ if (atomic_read(&driver->needs_tlbflush) || force) { -+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL); -+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC, -+ PSB_CR_BIF_CTRL); -+ wmb(); -+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC, -+ PSB_CR_BIF_CTRL); -+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL); -+ if (driver->dev_priv) { -+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); -+ if (IS_MRST(driver->dev_priv->dev)) -+ topaz_mmu_flushcache(driver->dev_priv); -+ } -+ } -+ atomic_set(&driver->needs_tlbflush, 0); -+} -+ -+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) -+{ -+ down_write(&driver->sem); -+ psb_mmu_flush_pd_locked(driver, force); -+ up_write(&driver->sem); -+} -+ -+void psb_mmu_flush(struct psb_mmu_driver *driver) -+{ -+ uint32_t val; -+ -+ down_write(&driver->sem); -+ if (driver->dev_priv->graphics_state == PSB_PWR_STATE_D0i0) { -+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL); -+ if (atomic_read(&driver->needs_tlbflush)) -+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC, -+ PSB_CR_BIF_CTRL); -+ else -+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH, -+ PSB_CR_BIF_CTRL); -+ wmb(); -+ psb_iowrite32(driver, -+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC), -+ PSB_CR_BIF_CTRL); -+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); -+ atomic_set(&driver->needs_tlbflush, 0); -+ } else { -+ PSB_DEBUG_PM("mmu flush when down\n"); -+ } -+ -+ if (driver->dev_priv) { -+ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); -+ if (IS_MRST(driver->dev_priv->dev)) -+ topaz_mmu_flushcache(driver->dev_priv); -+ } -+ -+ up_write(&driver->sem); -+} -+ -+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) -+{ -+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 : -+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4; -+ -+ ttm_tt_cache_flush(&pd->p, 1); -+ down_write(&pd->driver->sem); -+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), -+ offset); -+ wmb(); -+ psb_mmu_flush_pd_locked(pd->driver, 1); -+ pd->hw_context = hw_context; -+ up_write(&pd->driver->sem); -+ -+} -+ -+static inline unsigned long psb_pd_addr_end(unsigned long addr, -+ unsigned long end) -+{ -+ -+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; -+ return (addr < end) ? addr : end; -+} -+ -+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) -+{ -+ uint32_t mask = PSB_PTE_VALID; -+ -+ if (type & PSB_MMU_CACHED_MEMORY) -+ mask |= PSB_PTE_CACHED; -+ if (type & PSB_MMU_RO_MEMORY) -+ mask |= PSB_PTE_RO; -+ if (type & PSB_MMU_WO_MEMORY) -+ mask |= PSB_PTE_WO; -+ -+ return (pfn << PAGE_SHIFT) | mask; -+} -+ -+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, -+ int trap_pagefaults, int invalid_type) -+{ -+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); -+ uint32_t *v; -+ int i; -+ -+ if (!pd) -+ return NULL; -+ -+ pd->p = alloc_page(GFP_DMA32); -+ if (!pd->p) -+ goto out_err1; -+ pd->dummy_pt = alloc_page(GFP_DMA32); -+ if (!pd->dummy_pt) -+ goto out_err2; -+ pd->dummy_page = alloc_page(GFP_DMA32); -+ if (!pd->dummy_page) -+ goto out_err3; -+ -+ if (!trap_pagefaults) { -+ pd->invalid_pde = -+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), -+ invalid_type); -+ pd->invalid_pte = -+ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), -+ invalid_type); -+ } else { -+ pd->invalid_pde = 0; -+ pd->invalid_pte = 0; -+ } -+ -+ v = kmap(pd->dummy_pt); -+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) -+ v[i] = pd->invalid_pte; -+ -+ kunmap(pd->dummy_pt); -+ -+ v = kmap(pd->p); -+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) -+ v[i] = pd->invalid_pde; -+ -+ kunmap(pd->p); -+ -+ clear_page(kmap(pd->dummy_page)); -+ kunmap(pd->dummy_page); -+ -+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); -+ if (!pd->tables) -+ goto out_err4; -+ -+ pd->hw_context = -1; -+ pd->pd_mask = PSB_PTE_VALID; -+ pd->driver = driver; -+ -+ return pd; -+ -+out_err4: -+ __free_page(pd->dummy_page); -+out_err3: -+ __free_page(pd->dummy_pt); -+out_err2: -+ __free_page(pd->p); -+out_err1: -+ kfree(pd); -+ return NULL; -+} -+ -+void psb_mmu_free_pt(struct psb_mmu_pt *pt) -+{ -+ __free_page(pt->p); -+ kfree(pt); -+} -+ -+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) -+{ -+ struct psb_mmu_driver *driver = pd->driver; -+ struct psb_mmu_pt *pt; -+ int i; -+ -+ down_write(&driver->sem); -+ if (pd->hw_context != -1) { -+ psb_iowrite32(driver, 0, -+ PSB_CR_BIF_DIR_LIST_BASE0 + -+ pd->hw_context * 4); -+ psb_mmu_flush_pd_locked(driver, 1); -+ } -+ -+ /* Should take the spinlock here, but we don't need to do that -+ since we have the semaphore in write mode. */ -+ -+ for (i = 0; i < 1024; ++i) { -+ pt = pd->tables[i]; -+ if (pt) -+ psb_mmu_free_pt(pt); -+ } -+ -+ vfree(pd->tables); -+ __free_page(pd->dummy_page); -+ __free_page(pd->dummy_pt); -+ __free_page(pd->p); -+ kfree(pd); -+ up_write(&driver->sem); -+} -+ -+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) -+{ -+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); -+ void *v; -+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; -+ uint32_t clflush_count = PAGE_SIZE / clflush_add; -+ spinlock_t *lock = &pd->driver->lock; -+ uint8_t *clf; -+ uint32_t *ptes; -+ int i; -+ -+ if (!pt) -+ return NULL; -+ -+ pt->p = alloc_page(GFP_DMA32); -+ if (!pt->p) { -+ kfree(pt); -+ return NULL; -+ } -+ -+ spin_lock(lock); -+ -+ v = kmap_atomic(pt->p, KM_USER0); -+ clf = (uint8_t *) v; -+ ptes = (uint32_t *) v; -+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) -+ *ptes++ = pd->invalid_pte; -+ -+ -+#if defined(CONFIG_X86) -+ if (pd->driver->has_clflush && pd->hw_context != -1) { -+ mb(); -+ for (i = 0; i < clflush_count; ++i) { -+ psb_clflush(clf); -+ clf += clflush_add; -+ } -+ mb(); -+ } -+#endif -+ kunmap_atomic(v, KM_USER0); -+ spin_unlock(lock); -+ -+ pt->count = 0; -+ pt->pd = pd; -+ pt->index = 0; -+ -+ return pt; -+} -+ -+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, -+ unsigned long addr) -+{ -+ uint32_t index = psb_mmu_pd_index(addr); -+ struct psb_mmu_pt *pt; -+ uint32_t *v; -+ spinlock_t *lock = &pd->driver->lock; -+ -+ spin_lock(lock); -+ pt = pd->tables[index]; -+ while (!pt) { -+ spin_unlock(lock); -+ pt = psb_mmu_alloc_pt(pd); -+ if (!pt) -+ return NULL; -+ spin_lock(lock); -+ -+ if (pd->tables[index]) { -+ spin_unlock(lock); -+ psb_mmu_free_pt(pt); -+ spin_lock(lock); -+ pt = pd->tables[index]; -+ continue; -+ } -+ -+ v = kmap_atomic(pd->p, KM_USER0); -+ pd->tables[index] = pt; -+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; -+ pt->index = index; -+ kunmap_atomic((void *) v, KM_USER0); -+ -+ if (pd->hw_context != -1) { -+ psb_mmu_clflush(pd->driver, (void *) &v[index]); -+ atomic_set(&pd->driver->needs_tlbflush, 1); -+ } -+ } -+ pt->v = kmap_atomic(pt->p, KM_USER0); -+ return pt; -+} -+ -+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, -+ unsigned long addr) -+{ -+ uint32_t index = psb_mmu_pd_index(addr); -+ struct psb_mmu_pt *pt; -+ spinlock_t *lock = &pd->driver->lock; -+ -+ spin_lock(lock); -+ pt = pd->tables[index]; -+ if (!pt) { -+ spin_unlock(lock); -+ return NULL; -+ } -+ pt->v = kmap_atomic(pt->p, KM_USER0); -+ return pt; -+} -+ -+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) -+{ -+ struct psb_mmu_pd *pd = pt->pd; -+ uint32_t *v; -+ -+ kunmap_atomic(pt->v, KM_USER0); -+ if (pt->count == 0) { -+ v = kmap_atomic(pd->p, KM_USER0); -+ v[pt->index] = pd->invalid_pde; -+ pd->tables[pt->index] = NULL; -+ -+ if (pd->hw_context != -1) { -+ psb_mmu_clflush(pd->driver, -+ (void *) &v[pt->index]); -+ atomic_set(&pd->driver->needs_tlbflush, 1); -+ } -+ kunmap_atomic(pt->v, KM_USER0); -+ spin_unlock(&pd->driver->lock); -+ psb_mmu_free_pt(pt); -+ return; -+ } -+ spin_unlock(&pd->driver->lock); -+} -+ -+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, -+ unsigned long addr, uint32_t pte) -+{ -+ pt->v[psb_mmu_pt_index(addr)] = pte; -+} -+ -+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, -+ unsigned long addr) -+{ -+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; -+} -+ -+#if 0 -+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd, -+ uint32_t mmu_offset) -+{ -+ uint32_t *v; -+ uint32_t pfn; -+ -+ v = kmap_atomic(pd->p, KM_USER0); -+ if (!v) { -+ printk(KERN_INFO "Could not kmap pde page.\n"); -+ return 0; -+ } -+ pfn = v[psb_mmu_pd_index(mmu_offset)]; -+ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */ -+ kunmap_atomic(v, KM_USER0); -+ if (((pfn & 0x0F) != PSB_PTE_VALID)) { -+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n", -+ mmu_offset, pfn); -+ } -+ v = ioremap(pfn & 0xFFFFF000, 4096); -+ if (!v) { -+ printk(KERN_INFO "Could not kmap pte page.\n"); -+ return 0; -+ } -+ pfn = v[psb_mmu_pt_index(mmu_offset)]; -+ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */ -+ iounmap(v); -+ if (((pfn & 0x0F) != PSB_PTE_VALID)) { -+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n", -+ mmu_offset, pfn); -+ } -+ return pfn >> PAGE_SHIFT; -+} -+ -+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd, -+ uint32_t mmu_offset, -+ uint32_t gtt_pages) -+{ -+ uint32_t start; -+ uint32_t next; -+ -+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n", -+ mmu_offset, gtt_pages); -+ down_read(&pd->driver->sem); -+ start = psb_mmu_check_pte_locked(pd, mmu_offset); -+ mmu_offset += PAGE_SIZE; -+ gtt_pages -= 1; -+ while (gtt_pages--) { -+ next = psb_mmu_check_pte_locked(pd, mmu_offset); -+ if (next != start + 1) { -+ printk(KERN_INFO -+ "Ptes out of order: 0x%08x, 0x%08x.\n", -+ start, next); -+ } -+ start = next; -+ mmu_offset += PAGE_SIZE; -+ } -+ up_read(&pd->driver->sem); -+} -+ -+#endif -+ -+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, -+ uint32_t mmu_offset, uint32_t gtt_start, -+ uint32_t gtt_pages) -+{ -+ uint32_t *v; -+ uint32_t start = psb_mmu_pd_index(mmu_offset); -+ struct psb_mmu_driver *driver = pd->driver; -+ int num_pages = gtt_pages; -+ -+ down_read(&driver->sem); -+ spin_lock(&driver->lock); -+ -+ v = kmap_atomic(pd->p, KM_USER0); -+ v += start; -+ -+ while (gtt_pages--) { -+ *v++ = gtt_start | pd->pd_mask; -+ gtt_start += PAGE_SIZE; -+ } -+ -+ ttm_tt_cache_flush(&pd->p, num_pages); -+ kunmap_atomic(v, KM_USER0); -+ spin_unlock(&driver->lock); -+ -+ if (pd->hw_context != -1) -+ atomic_set(&pd->driver->needs_tlbflush, 1); -+ -+ up_read(&pd->driver->sem); -+ psb_mmu_flush_pd(pd->driver, 0); -+} -+ -+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) -+{ -+ struct psb_mmu_pd *pd; -+ -+ down_read(&driver->sem); -+ pd = driver->default_pd; -+ up_read(&driver->sem); -+ -+ return pd; -+} -+ -+/* Returns the physical address of the PD shared by sgx/msvdx */ -+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) -+{ -+ struct psb_mmu_pd *pd; -+ -+ pd = psb_mmu_get_default_pd(driver); -+ return page_to_pfn(pd->p) << PAGE_SHIFT; -+} -+ -+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) -+{ -+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL); -+ psb_mmu_free_pagedir(driver->default_pd); -+ kfree(driver); -+} -+ -+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, -+ int trap_pagefaults, -+ int invalid_type, -+ struct drm_psb_private *dev_priv) -+{ -+ struct psb_mmu_driver *driver; -+ -+ driver = kmalloc(sizeof(*driver), GFP_KERNEL); -+ -+ if (!driver) -+ return NULL; -+ driver->dev_priv = dev_priv; -+ -+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, -+ invalid_type); -+ if (!driver->default_pd) -+ goto out_err1; -+ -+ spin_lock_init(&driver->lock); -+ init_rwsem(&driver->sem); -+ down_write(&driver->sem); -+ driver->register_map = registers; -+ atomic_set(&driver->needs_tlbflush, 1); -+ -+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL); -+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT, -+ PSB_CR_BIF_CTRL); -+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT, -+ PSB_CR_BIF_CTRL); -+ -+ driver->has_clflush = 0; -+ -+#if defined(CONFIG_X86) -+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) { -+ uint32_t tfms, misc, cap0, cap4, clflush_size; -+ -+ /* -+ * clflush size is determined at kernel setup for x86_64 -+ * but not for i386. We have to do it here. -+ */ -+ -+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); -+ clflush_size = ((misc >> 8) & 0xff) * 8; -+ driver->has_clflush = 1; -+ driver->clflush_add = -+ PAGE_SIZE * clflush_size / sizeof(uint32_t); -+ driver->clflush_mask = driver->clflush_add - 1; -+ driver->clflush_mask = ~driver->clflush_mask; -+ } -+#endif -+ -+ up_write(&driver->sem); -+ return driver; -+ -+out_err1: -+ kfree(driver); -+ return NULL; -+} -+ -+#if defined(CONFIG_X86) -+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, -+ unsigned long address, uint32_t num_pages, -+ uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride) -+{ -+ struct psb_mmu_pt *pt; -+ uint32_t rows = 1; -+ uint32_t i; -+ unsigned long addr; -+ unsigned long end; -+ unsigned long next; -+ unsigned long add; -+ unsigned long row_add; -+ unsigned long clflush_add = pd->driver->clflush_add; -+ unsigned long clflush_mask = pd->driver->clflush_mask; -+ -+ if (!pd->driver->has_clflush) { -+ ttm_tt_cache_flush(&pd->p, num_pages); -+ return; -+ } -+ -+ if (hw_tile_stride) -+ rows = num_pages / desired_tile_stride; -+ else -+ desired_tile_stride = num_pages; -+ -+ add = desired_tile_stride << PAGE_SHIFT; -+ row_add = hw_tile_stride << PAGE_SHIFT; -+ mb(); -+ for (i = 0; i < rows; ++i) { -+ -+ addr = address; -+ end = addr + add; -+ -+ do { -+ next = psb_pd_addr_end(addr, end); -+ pt = psb_mmu_pt_map_lock(pd, addr); -+ if (!pt) -+ continue; -+ do { -+ psb_clflush(&pt->v -+ [psb_mmu_pt_index(addr)]); -+ } while (addr += -+ clflush_add, -+ (addr & clflush_mask) < next); -+ -+ psb_mmu_pt_unmap_unlock(pt); -+ } while (addr = next, next != end); -+ address += row_add; -+ } -+ mb(); -+} -+#else -+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, -+ unsigned long address, uint32_t num_pages, -+ uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride) -+{ -+ drm_ttm_cache_flush(&pd->p, num_pages); -+} -+#endif -+ -+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, -+ unsigned long address, uint32_t num_pages) -+{ -+ struct psb_mmu_pt *pt; -+ unsigned long addr; -+ unsigned long end; -+ unsigned long next; -+ unsigned long f_address = address; -+ -+ down_read(&pd->driver->sem); -+ -+ addr = address; -+ end = addr + (num_pages << PAGE_SHIFT); -+ -+ do { -+ next = psb_pd_addr_end(addr, end); -+ pt = psb_mmu_pt_alloc_map_lock(pd, addr); -+ if (!pt) -+ goto out; -+ do { -+ psb_mmu_invalidate_pte(pt, addr); -+ --pt->count; -+ } while (addr += PAGE_SIZE, addr < next); -+ psb_mmu_pt_unmap_unlock(pt); -+ -+ } while (addr = next, next != end); -+ -+out: -+ if (pd->hw_context != -1) -+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); -+ -+ up_read(&pd->driver->sem); -+ -+ if (pd->hw_context != -1) -+ psb_mmu_flush(pd->driver); -+ -+ return; -+} -+ -+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, -+ uint32_t num_pages, uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride) -+{ -+ struct psb_mmu_pt *pt; -+ uint32_t rows = 1; -+ uint32_t i; -+ unsigned long addr; -+ unsigned long end; -+ unsigned long next; -+ unsigned long add; -+ unsigned long row_add; -+ unsigned long f_address = address; -+ -+ if (hw_tile_stride) -+ rows = num_pages / desired_tile_stride; -+ else -+ desired_tile_stride = num_pages; -+ -+ add = desired_tile_stride << PAGE_SHIFT; -+ row_add = hw_tile_stride << PAGE_SHIFT; -+ -+ down_read(&pd->driver->sem); -+ -+ /* Make sure we only need to flush this processor's cache */ -+ -+ for (i = 0; i < rows; ++i) { -+ -+ addr = address; -+ end = addr + add; -+ -+ do { -+ next = psb_pd_addr_end(addr, end); -+ pt = psb_mmu_pt_map_lock(pd, addr); -+ if (!pt) -+ continue; -+ do { -+ psb_mmu_invalidate_pte(pt, addr); -+ --pt->count; -+ -+ } while (addr += PAGE_SIZE, addr < next); -+ psb_mmu_pt_unmap_unlock(pt); -+ -+ } while (addr = next, next != end); -+ address += row_add; -+ } -+ if (pd->hw_context != -1) -+ psb_mmu_flush_ptes(pd, f_address, num_pages, -+ desired_tile_stride, hw_tile_stride); -+ -+ up_read(&pd->driver->sem); -+ -+ if (pd->hw_context != -1) -+ psb_mmu_flush(pd->driver); -+} -+ -+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, -+ unsigned long address, uint32_t num_pages, -+ int type) -+{ -+ struct psb_mmu_pt *pt; -+ uint32_t pte; -+ unsigned long addr; -+ unsigned long end; -+ unsigned long next; -+ unsigned long f_address = address; -+ int ret = 0; -+ -+ down_read(&pd->driver->sem); -+ -+ addr = address; -+ end = addr + (num_pages << PAGE_SHIFT); -+ -+ do { -+ next = psb_pd_addr_end(addr, end); -+ pt = psb_mmu_pt_alloc_map_lock(pd, addr); -+ if (!pt) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ do { -+ pte = psb_mmu_mask_pte(start_pfn++, type); -+ psb_mmu_set_pte(pt, addr, pte); -+ pt->count++; -+ } while (addr += PAGE_SIZE, addr < next); -+ psb_mmu_pt_unmap_unlock(pt); -+ -+ } while (addr = next, next != end); -+ -+out: -+ if (pd->hw_context != -1) -+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); -+ -+ up_read(&pd->driver->sem); -+ -+ if (pd->hw_context != -1) -+ psb_mmu_flush(pd->driver); -+ -+ return ret; -+} -+ -+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, -+ unsigned long address, uint32_t num_pages, -+ uint32_t desired_tile_stride, -+ uint32_t hw_tile_stride, int type) -+{ -+ struct psb_mmu_pt *pt; -+ uint32_t rows = 1; -+ uint32_t i; -+ uint32_t pte; -+ unsigned long addr; -+ unsigned long end; -+ unsigned long next; -+ unsigned long add; -+ unsigned long row_add; -+ unsigned long f_address = address; -+ int ret = 0; -+ -+ if (hw_tile_stride) { -+ if (num_pages % desired_tile_stride != 0) -+ return -EINVAL; -+ rows = num_pages / desired_tile_stride; -+ } else { -+ desired_tile_stride = num_pages; -+ } -+ -+ add = desired_tile_stride << PAGE_SHIFT; -+ row_add = hw_tile_stride << PAGE_SHIFT; -+ -+ down_read(&pd->driver->sem); -+ -+ for (i = 0; i < rows; ++i) { -+ -+ addr = address; -+ end = addr + add; -+ -+ do { -+ next = psb_pd_addr_end(addr, end); -+ pt = psb_mmu_pt_alloc_map_lock(pd, addr); -+ if (!pt) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ do { -+ pte = -+ psb_mmu_mask_pte(page_to_pfn(*pages++), -+ type); -+ psb_mmu_set_pte(pt, addr, pte); -+ pt->count++; -+ } while (addr += PAGE_SIZE, addr < next); -+ psb_mmu_pt_unmap_unlock(pt); -+ -+ } while (addr = next, next != end); -+ -+ address += row_add; -+ } -+out: -+ if (pd->hw_context != -1) -+ psb_mmu_flush_ptes(pd, f_address, num_pages, -+ desired_tile_stride, hw_tile_stride); -+ -+ up_read(&pd->driver->sem); -+ -+ if (pd->hw_context != -1) -+ psb_mmu_flush(pd->driver); -+ -+ return ret; -+} -+ -+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask) -+{ -+ mask &= _PSB_MMU_ER_MASK; -+ psb_iowrite32(driver, -+ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask, -+ PSB_CR_BIF_CTRL); -+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); -+} -+ -+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, -+ uint32_t mask) -+{ -+ mask &= _PSB_MMU_ER_MASK; -+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask, -+ PSB_CR_BIF_CTRL); -+ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); -+} -+ -+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, -+ unsigned long *pfn) -+{ -+ int ret; -+ struct psb_mmu_pt *pt; -+ uint32_t tmp; -+ spinlock_t *lock = &pd->driver->lock; -+ -+ down_read(&pd->driver->sem); -+ pt = psb_mmu_pt_map_lock(pd, virtual); -+ if (!pt) { -+ uint32_t *v; -+ -+ spin_lock(lock); -+ v = kmap_atomic(pd->p, KM_USER0); -+ tmp = v[psb_mmu_pd_index(virtual)]; -+ kunmap_atomic(v, KM_USER0); -+ spin_unlock(lock); -+ -+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || -+ !(pd->invalid_pte & PSB_PTE_VALID)) { -+ ret = -EINVAL; -+ goto out; -+ } -+ ret = 0; -+ *pfn = pd->invalid_pte >> PAGE_SHIFT; -+ goto out; -+ } -+ tmp = pt->v[psb_mmu_pt_index(virtual)]; -+ if (!(tmp & PSB_PTE_VALID)) { -+ ret = -EINVAL; -+ } else { -+ ret = 0; -+ *pfn = tmp >> PAGE_SHIFT; -+ } -+ psb_mmu_pt_unmap_unlock(pt); -+out: -+ up_read(&pd->driver->sem); -+ return ret; -+} -+ -+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset) -+{ -+ struct page *p; -+ unsigned long pfn; -+ int ret = 0; -+ struct psb_mmu_pd *pd; -+ uint32_t *v; -+ uint32_t *vmmu; -+ -+ pd = driver->default_pd; -+ if (!pd) -+ printk(KERN_WARNING "Could not get default pd\n"); -+ -+ -+ p = alloc_page(GFP_DMA32); -+ -+ if (!p) { -+ printk(KERN_WARNING "Failed allocating page\n"); -+ return; -+ } -+ -+ v = kmap(p); -+ memset(v, 0x67, PAGE_SIZE); -+ -+ pfn = (offset >> PAGE_SHIFT); -+ -+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0); -+ if (ret) { -+ printk(KERN_WARNING "Failed inserting mmu page\n"); -+ goto out_err1; -+ } -+ -+ /* Ioremap the page through the GART aperture */ -+ -+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); -+ if (!vmmu) { -+ printk(KERN_WARNING "Failed ioremapping page\n"); -+ goto out_err2; -+ } -+ -+ /* Read from the page with mmu disabled. */ -+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu)); -+ -+ /* Enable the mmu for host accesses and read again. */ -+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST); -+ -+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n", -+ ioread32(vmmu)); -+ *v = 0x15243705; -+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n", -+ ioread32(vmmu)); -+ iowrite32(0x16243355, vmmu); -+ (void) ioread32(vmmu); -+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v); -+ -+ printk(KERN_INFO "Int stat is 0x%08x\n", -+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT)); -+ printk(KERN_INFO "Fault is 0x%08x\n", -+ psb_ioread32(driver, PSB_CR_BIF_FAULT)); -+ -+ /* Disable MMU for host accesses and clear page fault register */ -+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST); -+ iounmap(vmmu); -+out_err2: -+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0); -+out_err1: -+ kunmap(p); -+ __free_page(p); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c ---- a/drivers/gpu/drm/psb/psb_msvdx.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_msvdx.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,681 @@ -+/** -+ * file psb_msvdx.c -+ * MSVDX I/O operations and IRQ handling -+ * -+ */ -+ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#include <drm/drmP.h> -+#include <drm/drm_os_linux.h> -+#include "psb_drv.h" -+#include "psb_drm.h" -+#include "psb_msvdx.h" -+ -+#include <linux/io.h> -+#include <linux/delay.h> -+ -+#ifndef list_first_entry -+#define list_first_entry(ptr, type, member) \ -+ list_entry((ptr)->next, type, member) -+#endif -+ -+ -+static int psb_msvdx_send(struct drm_device *dev, void *cmd, -+ unsigned long cmd_size); -+ -+int psb_msvdx_dequeue_send(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL; -+ int ret = 0; -+ -+ if (list_empty(&dev_priv->msvdx_queue)) { -+ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n"); -+ dev_priv->msvdx_busy = 0; -+ return -EINVAL; -+ } -+ msvdx_cmd = list_first_entry(&dev_priv->msvdx_queue, -+ struct psb_msvdx_cmd_queue, head); -+ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence); -+ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size); -+ if (ret) { -+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n"); -+ ret = -EINVAL; -+ } -+ list_del(&msvdx_cmd->head); -+ kfree(msvdx_cmd->cmd); -+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER); -+ -+ return ret; -+} -+ -+int psb_msvdx_map_command(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, unsigned long cmd_size, -+ void **msvdx_cmd, uint32_t sequence, int copy_cmd) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ int ret = 0; -+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK; -+ unsigned long cmd_size_remaining; -+ struct ttm_bo_kmap_obj cmd_kmap; -+ void *cmd, *tmp, *cmd_start; -+ bool is_iomem; -+ -+ /* command buffers may not exceed page boundary */ -+ if (cmd_size + cmd_page_offset > PAGE_SIZE) -+ return -EINVAL; -+ -+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap); -+ if (ret) { -+ DRM_ERROR("MSVDXQUE:ret:%d\n", ret); -+ return ret; -+ } -+ -+ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem) -+ + cmd_page_offset; -+ cmd = cmd_start; -+ cmd_size_remaining = cmd_size; -+ -+ while (cmd_size_remaining > 0) { -+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE); -+ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID); -+ uint32_t mmu_ptd = 0, tmp = 0; -+ -+ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d" -+ " cur_cmd_id = %02x fence = %08x\n", -+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence); -+ if ((cur_cmd_size % sizeof(uint32_t)) -+ || (cur_cmd_size > cmd_size_remaining)) { -+ ret = -EINVAL; -+ DRM_ERROR("MSVDX: ret:%d\n", ret); -+ goto out; -+ } -+ -+ switch (cur_cmd_id) { -+ case VA_MSGID_RENDER: -+ /* Fence ID */ -+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE, -+ sequence); -+ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu); -+ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, -+ 1, 0); -+ if (tmp == 1) { -+ mmu_ptd |= 1; -+ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n"); -+ } -+ -+ /* PTD */ -+ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd); -+ break; -+ -+ default: -+ /* Msg not supported */ -+ ret = -EINVAL; -+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); -+ goto out; -+ } -+ -+ cmd += cur_cmd_size; -+ cmd_size_remaining -= cur_cmd_size; -+ } -+ -+ if (copy_cmd) { -+ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n"); -+ -+ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER); -+ if (tmp == NULL) { -+ ret = -ENOMEM; -+ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret); -+ goto out; -+ } -+ memcpy(tmp, cmd_start, cmd_size); -+ *msvdx_cmd = tmp; -+ } else { -+ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n"); -+ ret = psb_msvdx_send(dev, cmd_start, cmd_size); -+ if (ret) { -+ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n"); -+ ret = -EINVAL; -+ } -+ } -+ -+out: -+ ttm_bo_kunmap(&cmd_kmap); -+ -+ return ret; -+} -+ -+int psb_submit_video_cmdbuf(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, unsigned long cmd_size, -+ struct ttm_fence_object *fence) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO]; -+ unsigned long irq_flags; -+ int ret = 0; -+ -+ mutex_lock(&dev_priv->msvdx_mutex); -+ -+ psb_schedule_watchdog(dev_priv); -+ -+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags); -+ if (dev_priv->msvdx_needs_reset) { -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n"); -+ if (psb_msvdx_reset(dev_priv)) { -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ ret = -EBUSY; -+ DRM_ERROR("MSVDX: Reset failed\n"); -+ return ret; -+ } -+ dev_priv->msvdx_needs_reset = 0; -+ dev_priv->msvdx_busy = 0; -+ -+ psb_msvdx_init(dev); -+ psb_msvdx_irq_preinstall(dev_priv); -+ psb_msvdx_irq_postinstall(dev_priv); -+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags); -+ } -+ -+ if (!dev_priv->msvdx_fw_loaded) { -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ PSB_DEBUG_GENERAL("MSVDX:load /lib/firmware/msvdx_fw.bin" -+ " by udevd\n"); -+ -+ ret = psb_setup_fw(dev); -+ if (ret) { -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ -+ DRM_ERROR("MSVDX:is there a /lib/firmware/msvdx_fw.bin," -+ "and udevd is configured correctly?\n"); -+ -+ /* FIXME: find a proper return value */ -+ return -EFAULT; -+ } -+ dev_priv->msvdx_fw_loaded = 1; -+ -+ psb_msvdx_irq_preinstall(dev_priv); -+ psb_msvdx_irq_postinstall(dev_priv); -+ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n"); -+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags); -+ } -+ -+ -+ if (!dev_priv->msvdx_busy) { -+ dev_priv->msvdx_busy = 1; -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n", -+ sequence); -+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset, -+ cmd_size, NULL, sequence, 0); -+ if (ret) { -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n"); -+ return ret; -+ } -+ } else { -+ struct psb_msvdx_cmd_queue *msvdx_cmd; -+ void *cmd = NULL; -+ -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ /* queue the command to be sent when the h/w is ready */ -+ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n", -+ sequence); -+ msvdx_cmd = drm_calloc(1, sizeof(struct psb_msvdx_cmd_queue), -+ DRM_MEM_DRIVER); -+ if (msvdx_cmd == NULL) { -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ DRM_ERROR("MSVDXQUE: Out of memory...\n"); -+ return -ENOMEM; -+ } -+ -+ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset, -+ cmd_size, &cmd, sequence, 1); -+ if (ret) { -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n"); -+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), -+ DRM_MEM_DRIVER); -+ return ret; -+ } -+ msvdx_cmd->cmd = cmd; -+ msvdx_cmd->cmd_size = cmd_size; -+ msvdx_cmd->sequence = sequence; -+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags); -+ list_add_tail(&msvdx_cmd->head, &dev_priv->msvdx_queue); -+ if (!dev_priv->msvdx_busy) { -+ dev_priv->msvdx_busy = 1; -+ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n"); -+ psb_msvdx_dequeue_send(dev); -+ } -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ } -+ mutex_unlock(&dev_priv->msvdx_mutex); -+ return ret; -+} -+ -+int psb_msvdx_send(struct drm_device *dev, void *cmd, unsigned long cmd_size) -+{ -+ int ret = 0; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ while (cmd_size > 0) { -+ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE); -+ if (cur_cmd_size > cmd_size) { -+ ret = -EINVAL; -+ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n", -+ cmd_size, (unsigned long)cur_cmd_size); -+ goto out; -+ } -+ /* Send the message to h/w */ -+ ret = psb_mtx_send(dev_priv, cmd); -+ if (ret) { -+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); -+ goto out; -+ } -+ cmd += cur_cmd_size; -+ cmd_size -= cur_cmd_size; -+ } -+ -+out: -+ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret); -+ return ret; -+} -+ -+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg) -+{ -+ static uint32_t pad_msg[FWRK_PADMSG_SIZE]; -+ const uint32_t *p_msg = (uint32_t *) msg; -+ uint32_t msg_num, words_free, ridx, widx; -+ int ret = 0; -+ -+ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n"); -+ -+ /* we need clocks enabled before we touch VEC local ram */ -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4; -+ -+ if (msg_num > NUM_WORDS_MTX_BUF) { -+ ret = -EINVAL; -+ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret); -+ goto out; -+ } -+ -+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX); -+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); -+ -+ /* message would wrap, need to send a pad message */ -+ if (widx + msg_num > NUM_WORDS_MTX_BUF) { -+ /* Shouldn't happen for a PAD message itself */ -+ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID) -+ == FWRK_MSGID_PADDING); -+ -+ /* if the read pointer is at zero then we must wait for it to -+ * change otherwise the write pointer will equal the read -+ * pointer,which should only happen when the buffer is empty -+ * -+ * This will only happens if we try to overfill the queue, -+ * queue management should make -+ * sure this never happens in the first place. -+ */ -+ BUG_ON(0 == ridx); -+ if (0 == ridx) { -+ ret = -EINVAL; -+ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret); -+ goto out; -+ } -+ /* Send a pad message */ -+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE, -+ (NUM_WORDS_MTX_BUF - widx) << 2); -+ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID, -+ FWRK_MSGID_PADDING); -+ psb_mtx_send(dev_priv, pad_msg); -+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); -+ } -+ -+ if (widx >= ridx) -+ words_free = NUM_WORDS_MTX_BUF - (widx - ridx); -+ else -+ words_free = ridx - widx; -+ -+ BUG_ON(msg_num > words_free); -+ if (msg_num > words_free) { -+ ret = -EINVAL; -+ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret); -+ goto out; -+ } -+ -+ while (msg_num > 0) { -+ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2)); -+ msg_num--; -+ widx++; -+ if (NUM_WORDS_MTX_BUF == widx) -+ widx = 0; -+ } -+ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX); -+ -+ /* Make sure clocks are enabled before we kick */ -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ /* signal an interrupt to let the mtx know there is a new message */ -+ PSB_WMSVDX32(1, MSVDX_MTX_KICKI); -+ -+out: -+ return ret; -+} -+ -+/* -+ * MSVDX MTX interrupt -+ */ -+void psb_msvdx_mtx_interrupt(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ static uint32_t buf[128]; /* message buffer */ -+ uint32_t ridx, widx; -+ uint32_t num, ofs; /* message num and offset */ -+ -+ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n"); -+ -+ /* Are clocks enabled - If not enable before -+ * attempting to read from VLR -+ */ -+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) { -+ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n"); -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ } -+ -+loop: /* just for coding style check */ -+ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX); -+ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX); -+ -+ /* Get out of here if nothing */ -+ if (ridx == widx) -+ goto done; -+ -+ ofs = 0; -+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2)); -+ -+ /* round to nearest word */ -+ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4; -+ -+ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */ -+ -+ if (++ridx >= NUM_WORDS_HOST_BUF) -+ ridx = 0; -+ -+ for (ofs++; ofs < num; ofs++) { -+ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2)); -+ -+ if (++ridx >= NUM_WORDS_HOST_BUF) -+ ridx = 0; -+ } -+ -+ /* Update the Read index */ -+ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX); -+ -+ if (dev_priv->msvdx_needs_reset) -+ goto loop; -+ -+ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) { -+ case VA_MSGID_CMD_HW_PANIC: -+ case VA_MSGID_CMD_FAILED: { -+ uint32_t fence = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_FAILED_FENCE_VALUE); -+ uint32_t fault = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_FAILED_IRQSTATUS); -+ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID); -+ uint32_t diff = 0; -+ -+ if (msg_id == VA_MSGID_CMD_HW_PANIC) -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:" -+ "Fault detected" -+ " - Fence: %08x, Status: %08x" -+ " - resetting and ignoring error\n", -+ fence, fault); -+ else -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:" -+ "Fault detected" -+ " - Fence: %08x, Status: %08x" -+ " - resetting and ignoring error\n", -+ fence, fault); -+ -+ dev_priv->msvdx_needs_reset = 1; -+ -+ if (msg_id == VA_MSGID_CMD_HW_PANIC) { -+ diff = dev_priv->msvdx_current_sequence -+ - dev_priv->sequence[PSB_ENGINE_VIDEO]; -+ -+ if (diff > 0x0FFFFFFF) -+ dev_priv->msvdx_current_sequence++; -+ -+ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, " -+ "assuming %08x\n", -+ dev_priv->msvdx_current_sequence); -+ } else { -+ dev_priv->msvdx_current_sequence = fence; -+ } -+ -+ psb_fence_error(dev, PSB_ENGINE_VIDEO, -+ dev_priv->msvdx_current_sequence, -+ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED); -+ -+ /* Flush the command queue */ -+ psb_msvdx_flush_cmd_queue(dev); -+ -+ goto done; -+ } -+ case VA_MSGID_CMD_COMPLETED: { -+ uint32_t fence = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_COMPLETED_FENCE_VALUE); -+ uint32_t flags = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_COMPLETED_FLAGS); -+ -+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: " -+ "FenceID: %08x, flags: 0x%x\n", -+ fence, flags); -+ -+ dev_priv->msvdx_current_sequence = fence; -+ -+ psb_fence_handler(dev, PSB_ENGINE_VIDEO); -+ -+ if (flags & FW_VA_RENDER_HOST_INT) { -+ /*Now send the next command from the msvdx cmd queue */ -+ psb_msvdx_dequeue_send(dev); -+ goto done; -+ } -+ -+ break; -+ } -+ case VA_MSGID_CMD_COMPLETED_BATCH: { -+ uint32_t fence = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_COMPLETED_FENCE_VALUE); -+ uint32_t tickcnt = MEMIO_READ_FIELD(buf, -+ FW_VA_CMD_COMPLETED_NO_TICKS); -+ -+ /* we have the fence value in the message */ -+ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:" -+ " FenceID: %08x, TickCount: %08x\n", -+ fence, tickcnt); -+ dev_priv->msvdx_current_sequence = fence; -+ -+ break; -+ } -+ case VA_MSGID_ACK: -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n"); -+ break; -+ -+ case VA_MSGID_TEST1: -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n"); -+ break; -+ -+ case VA_MSGID_TEST2: -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n"); -+ break; -+ /* Don't need to do anything with these messages */ -+ -+ case VA_MSGID_DEBLOCK_REQUIRED: { -+ uint32_t ctxid = MEMIO_READ_FIELD(buf, -+ FW_VA_DEBLOCK_REQUIRED_CONTEXT); -+ -+ /* The BE we now be locked. */ -+ /* Unblock rendec by reading the mtx2mtx end of slice */ -+ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA); -+ -+ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED" -+ " Context=%08x\n", ctxid); -+ goto done; -+ } -+ default: -+ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n"); -+ goto done; -+ } -+ -+done: -+ -+#if 1 -+ if (!dev_priv->msvdx_busy) { -+ /* If the firmware says the hardware is idle -+ * and the CCB is empty then we can power down -+ */ -+ uint32_t fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS); -+ uint32_t ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX); -+ uint32_t ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX); -+ -+ /* check that clocks are enabled before reading VLR */ -+ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) && -+ (ccb_roff == ccb_woff)) { -+ PSB_DEBUG_GENERAL("MSVDX: Setting clock to minimal\n"); -+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); -+ } -+ } -+#endif -+ DRM_MEMORYBARRIER(); /* TBD check this... */ -+} -+ -+void psb_msvdx_lockup(struct drm_psb_private *dev_priv, -+ int *msvdx_lockup, int *msvdx_idle) -+{ -+ int tmp; -+ *msvdx_lockup = 0; -+ *msvdx_idle = 1; -+ -+ if (!dev_priv->has_msvdx) -+ return; -+#if 0 -+ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d " -+ "last_sequence:%d and last_submitted_sequence :%d\n", -+ dev_priv->msvdx_current_sequence, -+ dev_priv->msvdx_last_sequence, -+ dev_priv->sequence[PSB_ENGINE_VIDEO]); -+#endif -+ -+ tmp = dev_priv->msvdx_current_sequence - -+ dev_priv->sequence[PSB_ENGINE_VIDEO]; -+ -+ if (tmp > 0x0FFFFFFF) { -+ if (dev_priv->msvdx_current_sequence == -+ dev_priv->msvdx_last_sequence) { -+ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n", -+ dev_priv->msvdx_current_sequence); -+ *msvdx_lockup = 1; -+ } else { -+ PSB_DEBUG_GENERAL("MSVDXTimer: " -+ "msvdx responded fine so far\n"); -+ dev_priv->msvdx_last_sequence = -+ dev_priv->msvdx_current_sequence; -+ *msvdx_idle = 0; -+ } -+ } -+} -+ -+/* power up msvdx, OSPM function */ -+int psb_power_up_msvdx(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ int ret; -+ -+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i3) -+ return -EINVAL; -+ -+ PSB_DEBUG_TMP("power up msvdx\n"); -+ dump_stack(); -+ -+ psb_up_island_power(dev, PSB_VIDEO_DEC_ISLAND); -+ -+ ret = psb_msvdx_init(dev); -+ if (ret) { -+ DRM_ERROR("failed to init msvdx when power up it\n"); -+ goto err; -+ } -+ PSB_WMSVDX32(dev_priv->msvdx_clk_state, MSVDX_MAN_CLK_ENABLE); -+ -+ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n"); -+ -+ PSB_DEBUG_GENERAL("FIXME MSVDX MMU setting up\n"); -+ -+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0; -+ return 0; -+ -+err: -+ return -1; -+} -+ -+int psb_power_down_msvdx(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ -+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i0) -+ return -EINVAL; -+ if (dev_priv->msvdx_busy) { -+ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n"); -+ return -EBUSY; -+ } -+ -+ dev_priv->msvdx_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE); -+ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n"); -+ -+ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n"); -+ psb_down_island_power(dev, PSB_VIDEO_DEC_ISLAND); -+ -+ dev_priv->msvdx_state = PSB_PWR_STATE_D0i3; -+ -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h ---- a/drivers/gpu/drm/psb/psb_msvdx.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_msvdx.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,442 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#ifndef _PSB_MSVDX_H_ -+#define _PSB_MSVDX_H_ -+ -+#include "psb_drv.h" -+ -+void psb_msvdx_mtx_interrupt(struct drm_device *dev); -+int psb_msvdx_init(struct drm_device *dev); -+int psb_msvdx_uninit(struct drm_device *dev); -+int psb_msvdx_reset(struct drm_psb_private *dev_priv); -+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver); -+int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg); -+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv); -+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv); -+void psb_msvdx_flush_cmd_queue(struct drm_device *dev); -+extern void psb_msvdx_lockup(struct drm_psb_private *dev_priv, -+ int *msvdx_lockup, int *msvdx_idle); -+int psb_setup_fw(struct drm_device *dev); -+int psb_power_up_msvdx(struct drm_device *dev); -+int psb_power_down_msvdx(struct drm_device *dev); -+ -+/* Non-Optimal Invalidation is not default */ -+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 -+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) -+ -+#define FW_VA_RENDER_HOST_INT 0x00004000 -+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020 -+ -+/* There is no work currently underway on the hardware */ -+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 -+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200 -+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \ -+ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \ -+ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \ -+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE) -+ -+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \ -+ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \ -+ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE) -+ -+#define POULSBO_D0 0x5 -+#define POULSBO_D1 0x6 -+#define PSB_REVID_OFFSET 0x8 -+ -+#define MTX_CODE_BASE (0x80900000) -+#define MTX_DATA_BASE (0x82880000) -+#define PC_START_ADDRESS (0x80900000) -+ -+#define MTX_CORE_CODE_MEM (0x10) -+#define MTX_CORE_DATA_MEM (0x18) -+ -+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) -+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8) -+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \ -+ (0x00010000) -+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \ -+ (0x00100000) -+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \ -+ (0x01000000) -+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \ -+ (0x10000000) -+ -+#define clk_enable_all \ -+(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK) -+ -+#define clk_enable_minimal \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK -+ -+#define clk_enable_auto \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK -+ -+#define msvdx_sw_reset_all \ -+(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \ -+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \ -+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \ -+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \ -+MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK) -+ -+#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \ -+ (((R_SPECIFIER)<<4) | (U_SPECIFIER)) -+#define MTX_PC MTX_INTERNAL_REG(0, 5) -+ -+#define RENDEC_A_SIZE (1024 * 1024) -+#define RENDEC_B_SIZE (1024 * 1024) -+ -+#define MEMIO_READ_FIELD(vpMem, field) \ -+ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \ -+ & field##_MASK) >> field##_SHIFT)) -+ -+#define MEMIO_WRITE_FIELD(vpMem, field, value) \ -+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \ -+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \ -+ & (field##_TYPE)~field##_MASK) | \ -+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK); -+ -+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \ -+ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \ -+ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \ -+ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT))); -+ -+#define REGIO_READ_FIELD(reg_val, reg, field) \ -+ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT) -+ -+#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \ -+ (reg_val) = \ -+ ((reg_val) & ~(reg##_##field##_MASK)) | \ -+ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK)); -+ -+#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \ -+ (reg_val) = \ -+ ((reg_val) | ((value) << (reg##_##field##_SHIFT))); -+ -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \ -+ (0x00000001) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \ -+ (0x00000002) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \ -+ (0x00000004) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \ -+ (0x00000008) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \ -+ (0x00000010) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \ -+ (0x00000020) -+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \ -+ (0x00000040) -+ -+#define clk_enable_all \ -+ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \ -+MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK) -+ -+#define clk_enable_minimal \ -+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \ -+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK -+ -+/* MTX registers */ -+#define MSVDX_MTX_ENABLE (0x0000) -+#define MSVDX_MTX_KICKI (0x0088) -+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC) -+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8) -+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104) -+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108) -+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C) -+#define MSVDX_MTX_SOFT_RESET (0x0200) -+ -+/* MSVDX registers */ -+#define MSVDX_CONTROL (0x0600) -+#define MSVDX_INTERRUPT_CLEAR (0x060C) -+#define MSVDX_INTERRUPT_STATUS (0x0608) -+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610) -+#define MSVDX_MMU_CONTROL0 (0x0680) -+#define MSVDX_MTX_RAM_BANK (0x06F0) -+#define MSVDX_MAN_CLK_ENABLE (0x0620) -+ -+/* RENDEC registers */ -+#define MSVDX_RENDEC_CONTROL0 (0x0868) -+#define MSVDX_RENDEC_CONTROL1 (0x086C) -+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870) -+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874) -+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878) -+#define MSVDX_RENDEC_READ_DATA (0x0898) -+#define MSVDX_RENDEC_CONTEXT0 (0x0950) -+#define MSVDX_RENDEC_CONTEXT1 (0x0954) -+#define MSVDX_RENDEC_CONTEXT2 (0x0958) -+#define MSVDX_RENDEC_CONTEXT3 (0x095C) -+#define MSVDX_RENDEC_CONTEXT4 (0x0960) -+#define MSVDX_RENDEC_CONTEXT5 (0x0964) -+ -+/* -+ * This defines the MSVDX communication buffer -+ */ -+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */ -+/*!< Host buffer size (in 32-bit words) */ -+#define NUM_WORDS_HOST_BUF (100) -+/*!< MTX buffer size (in 32-bit words) */ -+#define NUM_WORDS_MTX_BUF (100) -+ -+/* There is no work currently underway on the hardware */ -+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 -+ -+#define MSVDX_COMMS_AREA_ADDR (0x02cc0) -+ -+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18) -+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04) -+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10) -+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00) -+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04) -+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08) -+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C) -+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10) -+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14) -+#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18) -+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C) -+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20) -+#define MSVDX_COMMS_TO_MTX_BUF \ -+ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2)) -+ -+#define MSVDX_COMMS_AREA_END \ -+ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2)) -+ -+#if (MSVDX_COMMS_AREA_END != 0x03000) -+#error -+#endif -+ -+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000) -+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31) -+ -+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000) -+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16) -+ -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000) -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20) -+ -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC) -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2) -+ -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002) -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1) -+ -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001) -+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0) -+ -+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001) -+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0) -+ -+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001) -+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0) -+ -+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100) -+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8) -+ -+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00) -+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8) -+ -+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000) -+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14) -+ -+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002) -+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1) -+ -+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000) -+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16) -+ -+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF) -+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0) -+ -+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000) -+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16) -+ -+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF) -+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0) -+ -+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000) -+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18) -+ -+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000) -+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16) -+ -+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000) -+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24) -+ -+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001) -+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0) -+ -+/* Start of parser specific Host->MTX messages. */ -+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) -+ -+/* Start of parser specific MTX->Host messages. */ -+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) -+ -+#define FWRK_MSGID_PADDING (0) -+ -+#define FWRK_GENMSG_SIZE_TYPE uint8_t -+#define FWRK_GENMSG_SIZE_MASK (0xFF) -+#define FWRK_GENMSG_SIZE_SHIFT (0) -+#define FWRK_GENMSG_SIZE_OFFSET (0x0000) -+#define FWRK_GENMSG_ID_TYPE uint8_t -+#define FWRK_GENMSG_ID_MASK (0xFF) -+#define FWRK_GENMSG_ID_SHIFT (0) -+#define FWRK_GENMSG_ID_OFFSET (0x0001) -+#define FWRK_PADMSG_SIZE (2) -+ -+/* This type defines the framework specified message ids */ -+enum { -+ /* ! Sent by the DXVA driver on the host to the mtx firmware. -+ */ -+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG, -+ VA_MSGID_RENDER, -+ VA_MSGID_DEBLOCK, -+ VA_MSGID_BUBBLE, -+ -+ /* Test Messages */ -+ VA_MSGID_TEST1, -+ VA_MSGID_TEST2, -+ -+ /*! Sent by the mtx firmware to itself. -+ */ -+ VA_MSGID_RENDER_MC_INTERRUPT, -+ -+ /*! Sent by the DXVA firmware on the MTX to the host. -+ */ -+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG, -+ VA_MSGID_CMD_COMPLETED_BATCH, -+ VA_MSGID_DEBLOCK_REQUIRED, -+ VA_MSGID_TEST_RESPONCE, -+ VA_MSGID_ACK, -+ -+ VA_MSGID_CMD_FAILED, -+ VA_MSGID_CMD_UNSUPPORTED, -+ VA_MSGID_CMD_HW_PANIC, -+}; -+ -+/* MSVDX Firmware interface */ -+#define FW_VA_INIT_SIZE (8) -+#define FW_VA_DEBUG_TEST2_SIZE (4) -+ -+/* FW_VA_DEBUG_TEST2 MSG_SIZE */ -+#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t -+#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF) -+#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000) -+#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0) -+ -+/* FW_VA_DEBUG_TEST2 ID */ -+#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t -+#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF) -+#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001) -+#define FW_VA_DEBUG_TEST2_ID_SHIFT (0) -+ -+/* FW_VA_CMD_FAILED FENCE_VALUE */ -+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t -+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF) -+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004) -+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0) -+ -+/* FW_VA_CMD_FAILED IRQSTATUS */ -+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t -+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF) -+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008) -+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0) -+ -+/* FW_VA_CMD_COMPLETED FENCE_VALUE */ -+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t -+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF) -+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004) -+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0) -+ -+/* FW_VA_CMD_COMPLETED FLAGS */ -+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4) -+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t -+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF) -+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF) -+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008) -+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0) -+ -+/* FW_VA_CMD_COMPLETED NO_TICKS */ -+#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t -+#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF) -+#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002) -+#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0) -+ -+/* FW_VA_DEBLOCK_REQUIRED CONTEXT */ -+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t -+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF) -+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004) -+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0) -+ -+/* FW_VA_INIT GLOBAL_PTD */ -+#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t -+#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF) -+#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004) -+#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0) -+ -+/* FW_VA_RENDER FENCE_VALUE */ -+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t -+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF) -+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010) -+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0) -+ -+/* FW_VA_RENDER MMUPTD */ -+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t -+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF) -+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004) -+#define FW_VA_RENDER_MMUPTD_SHIFT (0) -+ -+/* FW_VA_RENDER BUFFER_ADDRESS */ -+#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t -+#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF) -+#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008) -+#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0) -+ -+/* FW_VA_RENDER BUFFER_SIZE */ -+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t -+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF) -+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002) -+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0) -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c ---- a/drivers/gpu/drm/psb/psb_msvdxinit.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,668 @@ -+/** -+ * file psb_msvdxinit.c -+ * MSVDX initialization and mtx-firmware upload -+ * -+ */ -+ -+/************************************************************************** -+ * -+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA -+ * Copyright (c) Imagination Technologies Limited, UK -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#include <drm/drmP.h> -+#include <drm/drm.h> -+#include "psb_drv.h" -+#include "psb_msvdx.h" -+#include <linux/firmware.h> -+ -+#define MSVDX_REG (dev_priv->msvdx_reg) -+uint8_t psb_rev_id; -+/*MSVDX FW header*/ -+struct msvdx_fw { -+ uint32_t ver; -+ uint32_t text_size; -+ uint32_t data_size; -+ uint32_t data_location; -+}; -+ -+int psb_wait_for_register(struct drm_psb_private *dev_priv, -+ uint32_t offset, uint32_t value, uint32_t enable) -+{ -+ uint32_t tmp; -+ uint32_t poll_cnt = 10000; -+ while (poll_cnt) { -+ tmp = PSB_RMSVDX32(offset); -+ if (value == (tmp & enable)) /* All the bits are reset */ -+ return 0; /* So exit */ -+ -+ /* Wait a bit */ -+ DRM_UDELAY(1000); -+ poll_cnt--; -+ } -+ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:" -+ " expecting %08x (mask %08x), got %08x\n", -+ offset, value, enable, tmp); -+ -+ return 1; -+} -+ -+int psb_poll_mtx_irq(struct drm_psb_private *dev_priv) -+{ -+ int ret = 0; -+ uint32_t mtx_int = 0; -+ -+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, -+ 1); -+ -+ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS, -+ /* Required value */ -+ mtx_int, -+ /* Enabled bits */ -+ mtx_int); -+ -+ if (ret) { -+ DRM_ERROR("MSVDX: Error Mtx did not return" -+ " int within a resonable time\n"); -+ return ret; -+ } -+ -+ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n"); -+ -+ /* Got it so clear the bit */ -+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR); -+ -+ return ret; -+} -+ -+void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv, -+ const uint32_t core_reg, const uint32_t val) -+{ -+ uint32_t reg = 0; -+ -+ /* Put data in MTX_RW_DATA */ -+ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA); -+ -+ /* DREADY is set to 0 and request a write */ -+ reg = core_reg; -+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, -+ MTX_RNW, 0); -+ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, -+ MTX_DREADY, 0); -+ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST); -+ -+ psb_wait_for_register(dev_priv, -+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, -+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, -+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK); -+} -+ -+void psb_upload_fw(struct drm_psb_private *dev_priv, -+ const uint32_t data_mem, uint32_t ram_bank_size, -+ uint32_t address, const unsigned int words, -+ const uint32_t * const data) -+{ -+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0; -+ uint32_t access_ctrl; -+ -+ /* Save the access control register... */ -+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL); -+ -+ /* Wait for MCMSTAT to become be idle 1 */ -+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, -+ 1, /* Required Value */ -+ 0xffffffff /* Enables */); -+ -+ for (loop = 0; loop < words; loop++) { -+ ram_id = data_mem + (address / ram_bank_size); -+ if (ram_id != cur_bank) { -+ addr = address >> 2; -+ ctrl = 0; -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCMID, ram_id); -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCM_ADDR, addr); -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCMAI, 1); -+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); -+ cur_bank = ram_id; -+ } -+ address += 4; -+ -+ PSB_WMSVDX32(data[loop], -+ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER); -+ -+ /* Wait for MCMSTAT to become be idle 1 */ -+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, -+ 1, /* Required Value */ -+ 0xffffffff /* Enables */); -+ } -+ PSB_DEBUG_GENERAL("MSVDX: Upload done\n"); -+ -+ /* Restore the access control register... */ -+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); -+} -+ -+static int psb_verify_fw(struct drm_psb_private *dev_priv, -+ const uint32_t ram_bank_size, -+ const uint32_t data_mem, uint32_t address, -+ const uint32_t words, const uint32_t * const data) -+{ -+ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0; -+ uint32_t access_ctrl; -+ int ret = 0; -+ -+ /* Save the access control register... */ -+ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL); -+ -+ /* Wait for MCMSTAT to become be idle 1 */ -+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, -+ 1, /* Required Value */ -+ 0xffffffff /* Enables */); -+ -+ for (loop = 0; loop < words; loop++) { -+ uint32_t tmp; -+ ram_id = data_mem + (address / ram_bank_size); -+ -+ if (ram_id != cur_bank) { -+ addr = address >> 2; -+ ctrl = 0; -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCMID, ram_id); -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCM_ADDR, addr); -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCMAI, 1); -+ REGIO_WRITE_FIELD_LITE(ctrl, -+ MSVDX_MTX_RAM_ACCESS_CONTROL, -+ MTX_MCMR, 1); -+ -+ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); -+ -+ cur_bank = ram_id; -+ } -+ address += 4; -+ -+ /* Wait for MCMSTAT to become be idle 1 */ -+ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, -+ 1, /* Required Value */ -+ 0xffffffff /* Enables */); -+ -+ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER); -+ if (data[loop] != tmp) { -+ DRM_ERROR("psb: Firmware validation fails" -+ " at index=%08x\n", loop); -+ ret = 1; -+ break; -+ } -+ } -+ -+ /* Restore the access control register... */ -+ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL); -+ -+ return ret; -+} -+ -+static uint32_t *msvdx_get_fw(struct drm_device *dev, -+ const struct firmware **raw, uint8_t *name) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ int rc, fw_size; -+ int *ptr = NULL; -+ -+ rc = request_firmware(raw, name, &dev->pdev->dev); -+ if (rc < 0) { -+ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n", -+ name, rc); -+ return NULL; -+ } -+ -+ if ((*raw)->size < sizeof(struct msvdx_fw)) { -+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n", -+ name, (*raw)->size); -+ return NULL; -+ } -+ -+ ptr = (int *) ((*raw))->data; -+ -+ if (!ptr) { -+ DRM_ERROR("MSVDX: Failed to load %s\n", name); -+ return NULL; -+ } -+ -+ /* another sanity check... */ -+ fw_size = sizeof(struct msvdx_fw) + -+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size + -+ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size; -+ if ((*raw)->size != fw_size) { -+ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n", -+ name, (*raw)->size); -+ return NULL; -+ } -+ dev_priv->msvdx_fw = drm_calloc(1, fw_size, DRM_MEM_DRIVER); -+ if (dev_priv->msvdx_fw == NULL) -+ DRM_ERROR("MSVDX: allocate FW buffer failed\n"); -+ else { -+ memcpy(dev_priv->msvdx_fw, ptr, fw_size); -+ dev_priv->msvdx_fw_size = fw_size; -+ } -+ -+ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n"); -+ release_firmware(*raw); -+ -+ return dev_priv->msvdx_fw; -+} -+ -+int psb_setup_fw(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ int ret = 0; -+ -+ uint32_t ram_bank_size; -+ struct msvdx_fw *fw; -+ uint32_t *fw_ptr = NULL; -+ uint32_t *text_ptr = NULL; -+ uint32_t *data_ptr = NULL; -+ const struct firmware *raw = NULL; -+ /* todo : Assert the clock is on - if not turn it on to upload code */ -+ -+ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n"); -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ /* Reset MTX */ -+ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, -+ MSVDX_MTX_SOFT_RESET); -+ -+ /* Initialses Communication controll area to 0 */ -+ if (psb_rev_id >= POULSBO_D1) { -+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1" -+ " or later revision.\n"); -+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, -+ MSVDX_COMMS_OFFSET_FLAGS); -+ } else { -+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0" -+ " or earlier revision.\n"); -+ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, -+ MSVDX_COMMS_OFFSET_FLAGS); -+ } -+ -+ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER); -+ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE); -+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX); -+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX); -+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX); -+ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX); -+ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS); -+ -+ /* read register bank size */ -+ { -+ uint32_t bank_size, reg; -+ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK); -+ bank_size = -+ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK, -+ CR_MTX_RAM_BANK_SIZE); -+ ram_bank_size = (uint32_t) (1 << (bank_size + 2)); -+ } -+ -+ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n", -+ ram_bank_size); -+ -+ /* if FW already loaded from storage */ -+ if (dev_priv->msvdx_fw) -+ fw_ptr = dev_priv->msvdx_fw; -+ else -+ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin"); -+ -+ if (!fw_ptr) { -+ DRM_ERROR("psb: No valid msvdx_fw.bin firmware found.\n"); -+ ret = 1; -+ goto out; -+ } -+ -+ fw = (struct msvdx_fw *) fw_ptr; -+ if (fw->ver != 0x02) { -+ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch," -+ "got version=%02x expected version=%02x\n", -+ fw->ver, 0x02); -+ ret = 1; -+ goto out; -+ } -+ -+ text_ptr = -+ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw)); -+ data_ptr = text_ptr + fw->text_size; -+ -+ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n"); -+ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size); -+ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size); -+ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n", -+ fw->data_location); -+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n", -+ *text_ptr); -+ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n", -+ *data_ptr); -+ -+ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n"); -+ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size, -+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, -+ text_ptr); -+ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size, -+ fw->data_location - MTX_DATA_BASE, fw->data_size, -+ data_ptr); -+ -+#if 0 -+ /* todo : Verify code upload possibly only in debug */ -+ ret = psb_verify_fw(dev_priv, ram_bank_size, -+ MTX_CORE_CODE_MEM, -+ PC_START_ADDRESS - MTX_CODE_BASE, -+ fw->text_size, text_ptr); -+ if (ret) { -+ /* Firmware code upload failed */ -+ ret = 1; -+ goto out; -+ } -+ -+ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM, -+ fw->data_location - MTX_DATA_BASE, -+ fw->data_size, data_ptr); -+ if (ret) { -+ /* Firmware data upload failed */ -+ ret = 1; -+ goto out; -+ } -+#else -+ (void)psb_verify_fw; -+#endif -+ /* -- Set starting PC address */ -+ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS); -+ -+ /* -- Turn on the thread */ -+ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE); -+ -+ /* Wait for the signature value to be written back */ -+ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE, -+ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/ -+ 0xffffffff /* Enabled bits */); -+ if (ret) { -+ DRM_ERROR("MSVDX: firmware fails to initialize.\n"); -+ goto out; -+ } -+ -+ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n"); -+ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n", -+ MSVDX_COMMS_AREA_ADDR); -+#if 0 -+ -+ /* Send test message */ -+ { -+ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2]; -+ -+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE, -+ FW_VA_DEBUG_TEST2_SIZE); -+ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID, -+ VA_MSGID_TEST2); -+ -+ ret = psb_mtx_send(dev_priv, msg_buf); -+ if (ret) { -+ DRM_ERROR("psb: MSVDX sending fails.\n"); -+ goto out; -+ } -+ -+ /* Wait for Mtx to ack this message */ -+ psb_poll_mtx_irq(dev_priv); -+ -+ } -+#endif -+out: -+ -+ return ret; -+} -+ -+ -+static void psb_free_ccb(struct ttm_buffer_object **ccb) -+{ -+ ttm_bo_unref(ccb); -+ *ccb = NULL; -+} -+ -+/** -+ * Reset chip and disable interrupts. -+ * Return 0 success, 1 failure -+ */ -+int psb_msvdx_reset(struct drm_psb_private *dev_priv) -+{ -+ int ret = 0; -+ -+ /* Issue software reset */ -+ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL); -+ -+ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0, -+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK); -+ -+ if (!ret) { -+ /* Clear interrupt enabled flag */ -+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE); -+ -+ /* Clear any pending interrupt flags */ -+ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR); -+ } -+ -+ /* mutex_destroy(&dev_priv->msvdx_mutex); */ -+ -+ return ret; -+} -+ -+static int psb_allocate_ccb(struct drm_device *dev, -+ struct ttm_buffer_object **ccb, -+ uint32_t *base_addr, int size) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ int ret; -+ struct ttm_bo_kmap_obj tmp_kmap; -+ bool is_iomem; -+ -+ PSB_DEBUG_INIT("MSVDX: allocate CCB\n"); -+ -+ ret = ttm_buffer_object_create(bdev, size, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_KERNEL | -+ TTM_PL_FLAG_NO_EVICT, 0, 0, 0, -+ NULL, ccb); -+ if (ret) { -+ DRM_ERROR("MSVDX:failed to allocate CCB.\n"); -+ *ccb = NULL; -+ return 1; -+ } -+ -+ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap); -+ if (ret) { -+ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret); -+ ttm_bo_unref(ccb); -+ *ccb = NULL; -+ return 1; -+ } -+ -+ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0, -+ RENDEC_A_SIZE); -+ ttm_bo_kunmap(&tmp_kmap); -+ -+ *base_addr = (*ccb)->offset; -+ return 0; -+} -+ -+int psb_msvdx_init(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t cmd; -+ /* uint32_t clk_gate_ctrl = clk_enable_all; */ -+ int ret; -+ -+ if (!dev_priv->ccb0) { /* one for the first time */ -+ /* Initialize comand msvdx queueing */ -+ INIT_LIST_HEAD(&dev_priv->msvdx_queue); -+ mutex_init(&dev_priv->msvdx_mutex); -+ spin_lock_init(&dev_priv->msvdx_lock); -+ /*figure out the stepping */ -+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id); -+ } -+ -+ dev_priv->msvdx_busy = 0; -+ -+ /* Enable Clocks */ -+ PSB_DEBUG_GENERAL("Enabling clocks\n"); -+ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE); -+ -+ /* Enable MMU by removing all bypass bits */ -+ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0); -+ -+ /* move firmware loading to the place receiving first command buffer */ -+ -+ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n"); -+ /* Allocate device virtual memory as required by rendec.... */ -+ if (!dev_priv->ccb0) { -+ ret = psb_allocate_ccb(dev, &dev_priv->ccb0, -+ &dev_priv->base_addr0, -+ RENDEC_A_SIZE); -+ if (ret) -+ goto err_exit; -+ } -+ -+ if (!dev_priv->ccb1) { -+ ret = psb_allocate_ccb(dev, &dev_priv->ccb1, -+ &dev_priv->base_addr1, -+ RENDEC_B_SIZE); -+ if (ret) -+ goto err_exit; -+ } -+ -+ -+ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n", -+ dev_priv->base_addr0, dev_priv->base_addr1); -+ -+ PSB_WMSVDX32(dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0); -+ PSB_WMSVDX32(dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1); -+ -+ cmd = 0; -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE, -+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096); -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE, -+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE); -+ -+ cmd = 0; -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, -+ RENDEC_DECODE_START_SIZE, 0); -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, -+ RENDEC_BURST_SIZE_W, 1); -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, -+ RENDEC_BURST_SIZE_R, 1); -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1, -+ RENDEC_EXTERNAL_MEMORY, 1); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1); -+ -+ cmd = 0x00101010; -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5); -+ -+ cmd = 0; -+ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, -+ 1); -+ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0); -+ -+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); -+ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the" -+ " place when receiving user space commands\n"); -+ -+ dev_priv->msvdx_fw_loaded = 0; /* need to load firware */ -+ -+ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); -+ -+#if 0 -+ ret = psb_setup_fw(dev); -+ if (ret) -+ goto err_exit; -+ /* Send Initialisation message to firmware */ -+ if (0) { -+ uint32_t msg_init[FW_VA_INIT_SIZE >> 2]; -+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE, -+ FW_VA_INIT_SIZE); -+ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT); -+ -+ /* Need to set this for all but A0 */ -+ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD, -+ psb_get_default_pd_addr(dev_priv->mmu)); -+ -+ ret = psb_mtx_send(dev_priv, msg_init); -+ if (ret) -+ goto err_exit; -+ -+ psb_poll_mtx_irq(dev_priv); -+ } -+#endif -+ -+ return 0; -+ -+err_exit: -+ DRM_ERROR("MSVDX: initialization failed\n"); -+ if (dev_priv->ccb0) -+ psb_free_ccb(&dev_priv->ccb0); -+ if (dev_priv->ccb1) -+ psb_free_ccb(&dev_priv->ccb1); -+ -+ return 1; -+} -+ -+int psb_msvdx_uninit(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ -+ /* Reset MSVDX chip */ -+ psb_msvdx_reset(dev_priv); -+ -+ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */ -+ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n"); -+ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE); -+ -+ if (dev_priv->ccb0) -+ psb_free_ccb(&dev_priv->ccb0); -+ if (dev_priv->ccb1) -+ psb_free_ccb(&dev_priv->ccb1); -+ if (dev_priv->msvdx_fw) -+ drm_free(dev_priv->msvdx_fw, dev_priv->msvdx_fw_size, -+ DRM_MEM_DRIVER); -+ -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h ---- a/drivers/gpu/drm/psb/psb_reg.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_reg.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,569 @@ -+/************************************************************************** -+ * -+ * Copyright (c) (2005-2007) Imagination Technologies Limited. -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+#ifndef _PSB_REG_H_ -+#define _PSB_REG_H_ -+ -+#define PSB_CR_CLKGATECTL 0x0000 -+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24) -+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20) -+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20) -+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16) -+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16) -+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12) -+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12) -+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8) -+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8) -+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4) -+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4) -+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0) -+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0) -+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0) -+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1) -+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2) -+ -+#define PSB_CR_CORE_ID 0x0010 -+#define _PSB_CC_ID_ID_SHIFT (16) -+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16) -+#define _PSB_CC_ID_CONFIG_SHIFT (0) -+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0) -+ -+#define PSB_CR_CORE_REVISION 0x0014 -+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24) -+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24) -+#define _PSB_CC_REVISION_MAJOR_SHIFT (16) -+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16) -+#define _PSB_CC_REVISION_MINOR_SHIFT (8) -+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8) -+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0) -+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0) -+ -+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018 -+ -+#define PSB_CR_SOFT_RESET 0x0080 -+#define _PSB_CS_RESET_TSP_RESET (1 << 6) -+#define _PSB_CS_RESET_ISP_RESET (1 << 5) -+#define _PSB_CS_RESET_USE_RESET (1 << 4) -+#define _PSB_CS_RESET_TA_RESET (1 << 3) -+#define _PSB_CS_RESET_DPM_RESET (1 << 2) -+#define _PSB_CS_RESET_TWOD_RESET (1 << 1) -+#define _PSB_CS_RESET_BIF_RESET (1 << 0) -+ -+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C -+ -+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110 -+ -+#define PSB_CR_EVENT_STATUS2 0x0118 -+ -+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114 -+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4) -+ -+#define PSB_CR_EVENT_STATUS 0x012C -+ -+#define PSB_CR_EVENT_HOST_ENABLE 0x0130 -+ -+#define PSB_CR_EVENT_HOST_CLEAR 0x0134 -+#define _PSB_CE_MASTER_INTERRUPT (1 << 31) -+#define _PSB_CE_TA_DPM_FAULT (1 << 28) -+#define _PSB_CE_TWOD_COMPLETE (1 << 27) -+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25) -+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24) -+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18) -+#define _PSB_CE_SW_EVENT (1 << 14) -+#define _PSB_CE_TA_FINISHED (1 << 13) -+#define _PSB_CE_TA_TERMINATE (1 << 12) -+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3) -+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2) -+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1) -+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0) -+ -+ -+#define PSB_USE_OFFSET_MASK 0x0007FFFF -+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1) -+#define PSB_CR_USE_CODE_BASE0 0x0A0C -+#define PSB_CR_USE_CODE_BASE1 0x0A10 -+#define PSB_CR_USE_CODE_BASE2 0x0A14 -+#define PSB_CR_USE_CODE_BASE3 0x0A18 -+#define PSB_CR_USE_CODE_BASE4 0x0A1C -+#define PSB_CR_USE_CODE_BASE5 0x0A20 -+#define PSB_CR_USE_CODE_BASE6 0x0A24 -+#define PSB_CR_USE_CODE_BASE7 0x0A28 -+#define PSB_CR_USE_CODE_BASE8 0x0A2C -+#define PSB_CR_USE_CODE_BASE9 0x0A30 -+#define PSB_CR_USE_CODE_BASE10 0x0A34 -+#define PSB_CR_USE_CODE_BASE11 0x0A38 -+#define PSB_CR_USE_CODE_BASE12 0x0A3C -+#define PSB_CR_USE_CODE_BASE13 0x0A40 -+#define PSB_CR_USE_CODE_BASE14 0x0A44 -+#define PSB_CR_USE_CODE_BASE15 0x0A48 -+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2)) -+#define _PSB_CUC_BASE_DM_SHIFT (25) -+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25) -+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */ -+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7) -+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0) -+#define _PSB_CUC_DM_VERTEX (0) -+#define _PSB_CUC_DM_PIXEL (1) -+#define _PSB_CUC_DM_RESERVED (2) -+#define _PSB_CUC_DM_EDM (3) -+ -+#define PSB_CR_PDS_EXEC_BASE 0x0AB8 -+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */ -+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20) -+ -+#define PSB_CR_EVENT_KICKER 0x0AC4 -+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */ -+ -+#define PSB_CR_EVENT_KICK 0x0AC8 -+#define _PSB_CE_KICK_NOW (1 << 0) -+ -+ -+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38 -+ -+#define PSB_CR_BIF_CTRL 0x0C00 -+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4) -+#define _PSB_CB_CTRL_INVALDC (1 << 3) -+#define _PSB_CB_CTRL_FLUSH (1 << 2) -+ -+#define PSB_CR_BIF_INT_STAT 0x0C04 -+ -+#define PSB_CR_BIF_FAULT 0x0C08 -+#define _PSB_CBI_STAT_PF_N_RW (1 << 14) -+#define _PSB_CBI_STAT_FAULT_SHIFT (0) -+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0) -+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1) -+#define _PSB_CBI_STAT_FAULT_TA (1 << 2) -+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3) -+#define _PSB_CBI_STAT_FAULT_2D (1 << 4) -+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5) -+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6) -+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7) -+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8) -+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9) -+ -+#define PSB_CR_BIF_BANK0 0x0C78 -+ -+#define PSB_CR_BIF_BANK1 0x0C7C -+ -+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84 -+ -+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88 -+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC -+ -+#define PSB_CR_2D_SOCIF 0x0E18 -+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0) -+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0) -+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0) -+ -+#define PSB_CR_2D_BLIT_STATUS 0x0E04 -+#define _PSB_C2B_STATUS_BUSY (1 << 24) -+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0) -+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0) -+ -+/* -+ * 2D defs. -+ */ -+ -+/* -+ * 2D Slave Port Data : Block Header's Object Type -+ */ -+ -+#define PSB_2D_CLIP_BH (0x00000000) -+#define PSB_2D_PAT_BH (0x10000000) -+#define PSB_2D_CTRL_BH (0x20000000) -+#define PSB_2D_SRC_OFF_BH (0x30000000) -+#define PSB_2D_MASK_OFF_BH (0x40000000) -+#define PSB_2D_RESERVED1_BH (0x50000000) -+#define PSB_2D_RESERVED2_BH (0x60000000) -+#define PSB_2D_FENCE_BH (0x70000000) -+#define PSB_2D_BLIT_BH (0x80000000) -+#define PSB_2D_SRC_SURF_BH (0x90000000) -+#define PSB_2D_DST_SURF_BH (0xA0000000) -+#define PSB_2D_PAT_SURF_BH (0xB0000000) -+#define PSB_2D_SRC_PAL_BH (0xC0000000) -+#define PSB_2D_PAT_PAL_BH (0xD0000000) -+#define PSB_2D_MASK_SURF_BH (0xE0000000) -+#define PSB_2D_FLUSH_BH (0xF0000000) -+ -+/* -+ * Clip Definition block (PSB_2D_CLIP_BH) -+ */ -+#define PSB_2D_CLIPCOUNT_MAX (1) -+#define PSB_2D_CLIPCOUNT_MASK (0x00000000) -+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF) -+#define PSB_2D_CLIPCOUNT_SHIFT (0) -+/* clip rectangle min & max */ -+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000) -+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF) -+#define PSB_2D_CLIP_XMAX_SHIFT (12) -+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF) -+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000) -+#define PSB_2D_CLIP_XMIN_SHIFT (0) -+/* clip rectangle offset */ -+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000) -+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF) -+#define PSB_2D_CLIP_YMAX_SHIFT (12) -+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF) -+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000) -+#define PSB_2D_CLIP_YMIN_SHIFT (0) -+ -+/* -+ * Pattern Control (PSB_2D_PAT_BH) -+ */ -+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F) -+#define PSB_2D_PAT_HEIGHT_SHIFT (0) -+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0) -+#define PSB_2D_PAT_WIDTH_SHIFT (5) -+#define PSB_2D_PAT_YSTART_MASK (0x00007C00) -+#define PSB_2D_PAT_YSTART_SHIFT (10) -+#define PSB_2D_PAT_XSTART_MASK (0x000F8000) -+#define PSB_2D_PAT_XSTART_SHIFT (15) -+ -+/* -+ * 2D Control block (PSB_2D_CTRL_BH) -+ */ -+/* Present Flags */ -+#define PSB_2D_SRCCK_CTRL (0x00000001) -+#define PSB_2D_DSTCK_CTRL (0x00000002) -+#define PSB_2D_ALPHA_CTRL (0x00000004) -+/* Colour Key Colour (SRC/DST)*/ -+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF) -+#define PSB_2D_CK_COL_CLRMASK (0x00000000) -+#define PSB_2D_CK_COL_SHIFT (0) -+/* Colour Key Mask (SRC/DST)*/ -+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF) -+#define PSB_2D_CK_MASK_CLRMASK (0x00000000) -+#define PSB_2D_CK_MASK_SHIFT (0) -+/* Alpha Control (Alpha/RGB)*/ -+#define PSB_2D_GBLALPHA_MASK (0x000FF000) -+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF) -+#define PSB_2D_GBLALPHA_SHIFT (12) -+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000) -+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF) -+#define PSB_2D_SRCALPHA_OP_SHIFT (20) -+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000) -+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000) -+#define PSB_2D_SRCALPHA_OP_DST (0x00200000) -+#define PSB_2D_SRCALPHA_OP_SG (0x00300000) -+#define PSB_2D_SRCALPHA_OP_DG (0x00400000) -+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000) -+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000) -+#define PSB_2D_SRCALPHA_INVERT (0x00800000) -+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF) -+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000) -+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF) -+#define PSB_2D_DSTALPHA_OP_SHIFT (24) -+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000) -+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000) -+#define PSB_2D_DSTALPHA_OP_DST (0x02000000) -+#define PSB_2D_DSTALPHA_OP_SG (0x03000000) -+#define PSB_2D_DSTALPHA_OP_DG (0x04000000) -+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000) -+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000) -+#define PSB_2D_DSTALPHA_INVERT (0x08000000) -+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF) -+ -+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000) -+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF) -+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000) -+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF) -+ -+/* -+ *Source Offset (PSB_2D_SRC_OFF_BH) -+ */ -+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12) -+#define PSB_2D_SRCOFF_XSTART_SHIFT (12) -+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF) -+#define PSB_2D_SRCOFF_YSTART_SHIFT (0) -+ -+/* -+ * Mask Offset (PSB_2D_MASK_OFF_BH) -+ */ -+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12) -+#define PSB_2D_MASKOFF_XSTART_SHIFT (12) -+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF) -+#define PSB_2D_MASKOFF_YSTART_SHIFT (0) -+ -+/* -+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored -+ */ -+ -+/* -+ *Blit Rectangle (PSB_2D_BLIT_BH) -+ */ -+ -+#define PSB_2D_ROT_MASK (3<<25) -+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK) -+#define PSB_2D_ROT_NONE (0<<25) -+#define PSB_2D_ROT_90DEGS (1<<25) -+#define PSB_2D_ROT_180DEGS (2<<25) -+#define PSB_2D_ROT_270DEGS (3<<25) -+ -+#define PSB_2D_COPYORDER_MASK (3<<23) -+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK) -+#define PSB_2D_COPYORDER_TL2BR (0<<23) -+#define PSB_2D_COPYORDER_BR2TL (1<<23) -+#define PSB_2D_COPYORDER_TR2BL (2<<23) -+#define PSB_2D_COPYORDER_BL2TR (3<<23) -+ -+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF) -+#define PSB_2D_DSTCK_DISABLE (0x00000000) -+#define PSB_2D_DSTCK_PASS (0x00200000) -+#define PSB_2D_DSTCK_REJECT (0x00400000) -+ -+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF) -+#define PSB_2D_SRCCK_DISABLE (0x00000000) -+#define PSB_2D_SRCCK_PASS (0x00080000) -+#define PSB_2D_SRCCK_REJECT (0x00100000) -+ -+#define PSB_2D_CLIP_ENABLE (0x00040000) -+ -+#define PSB_2D_ALPHA_ENABLE (0x00020000) -+ -+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF) -+#define PSB_2D_PAT_MASK (0x00010000) -+#define PSB_2D_USE_PAT (0x00010000) -+#define PSB_2D_USE_FILL (0x00000000) -+/* -+ * Tungsten Graphics note on rop codes: If rop A and rop B are -+ * identical, the mask surface will not be read and need not be -+ * set up. -+ */ -+ -+#define PSB_2D_ROP3B_MASK (0x0000FF00) -+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF) -+#define PSB_2D_ROP3B_SHIFT (8) -+/* rop code A */ -+#define PSB_2D_ROP3A_MASK (0x000000FF) -+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00) -+#define PSB_2D_ROP3A_SHIFT (0) -+ -+#define PSB_2D_ROP4_MASK (0x0000FFFF) -+/* -+ * DWORD0: (Only pass if Pattern control == Use Fill Colour) -+ * Fill Colour RGBA8888 -+ */ -+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF) -+#define PSB_2D_FILLCOLOUR_SHIFT (0) -+/* -+ * DWORD1: (Always Present) -+ * X Start (Dest) -+ * Y Start (Dest) -+ */ -+#define PSB_2D_DST_XSTART_MASK (0x00FFF000) -+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF) -+#define PSB_2D_DST_XSTART_SHIFT (12) -+#define PSB_2D_DST_YSTART_MASK (0x00000FFF) -+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000) -+#define PSB_2D_DST_YSTART_SHIFT (0) -+/* -+ * DWORD2: (Always Present) -+ * X Size (Dest) -+ * Y Size (Dest) -+ */ -+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000) -+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF) -+#define PSB_2D_DST_XSIZE_SHIFT (12) -+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF) -+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000) -+#define PSB_2D_DST_YSIZE_SHIFT (0) -+ -+/* -+ * Source Surface (PSB_2D_SRC_SURF_BH) -+ */ -+/* -+ * WORD 0 -+ */ -+ -+#define PSB_2D_SRC_FORMAT_MASK (0x00078000) -+#define PSB_2D_SRC_1_PAL (0x00000000) -+#define PSB_2D_SRC_2_PAL (0x00008000) -+#define PSB_2D_SRC_4_PAL (0x00010000) -+#define PSB_2D_SRC_8_PAL (0x00018000) -+#define PSB_2D_SRC_8_ALPHA (0x00020000) -+#define PSB_2D_SRC_4_ALPHA (0x00028000) -+#define PSB_2D_SRC_332RGB (0x00030000) -+#define PSB_2D_SRC_4444ARGB (0x00038000) -+#define PSB_2D_SRC_555RGB (0x00040000) -+#define PSB_2D_SRC_1555ARGB (0x00048000) -+#define PSB_2D_SRC_565RGB (0x00050000) -+#define PSB_2D_SRC_0888ARGB (0x00058000) -+#define PSB_2D_SRC_8888ARGB (0x00060000) -+#define PSB_2D_SRC_8888UYVY (0x00068000) -+#define PSB_2D_SRC_RESERVED (0x00070000) -+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000) -+ -+ -+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF) -+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000) -+#define PSB_2D_SRC_STRIDE_SHIFT (0) -+/* -+ * WORD 1 - Base Address -+ */ -+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC) -+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003) -+#define PSB_2D_SRC_ADDR_SHIFT (2) -+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2) -+ -+/* -+ * Pattern Surface (PSB_2D_PAT_SURF_BH) -+ */ -+/* -+ * WORD 0 -+ */ -+ -+#define PSB_2D_PAT_FORMAT_MASK (0x00078000) -+#define PSB_2D_PAT_1_PAL (0x00000000) -+#define PSB_2D_PAT_2_PAL (0x00008000) -+#define PSB_2D_PAT_4_PAL (0x00010000) -+#define PSB_2D_PAT_8_PAL (0x00018000) -+#define PSB_2D_PAT_8_ALPHA (0x00020000) -+#define PSB_2D_PAT_4_ALPHA (0x00028000) -+#define PSB_2D_PAT_332RGB (0x00030000) -+#define PSB_2D_PAT_4444ARGB (0x00038000) -+#define PSB_2D_PAT_555RGB (0x00040000) -+#define PSB_2D_PAT_1555ARGB (0x00048000) -+#define PSB_2D_PAT_565RGB (0x00050000) -+#define PSB_2D_PAT_0888ARGB (0x00058000) -+#define PSB_2D_PAT_8888ARGB (0x00060000) -+ -+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF) -+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000) -+#define PSB_2D_PAT_STRIDE_SHIFT (0) -+/* -+ * WORD 1 - Base Address -+ */ -+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC) -+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003) -+#define PSB_2D_PAT_ADDR_SHIFT (2) -+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2) -+ -+/* -+ * Destination Surface (PSB_2D_DST_SURF_BH) -+ */ -+/* -+ * WORD 0 -+ */ -+ -+#define PSB_2D_DST_FORMAT_MASK (0x00078000) -+#define PSB_2D_DST_332RGB (0x00030000) -+#define PSB_2D_DST_4444ARGB (0x00038000) -+#define PSB_2D_DST_555RGB (0x00040000) -+#define PSB_2D_DST_1555ARGB (0x00048000) -+#define PSB_2D_DST_565RGB (0x00050000) -+#define PSB_2D_DST_0888ARGB (0x00058000) -+#define PSB_2D_DST_8888ARGB (0x00060000) -+#define PSB_2D_DST_8888AYUV (0x00070000) -+ -+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF) -+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000) -+#define PSB_2D_DST_STRIDE_SHIFT (0) -+/* -+ * WORD 1 - Base Address -+ */ -+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC) -+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003) -+#define PSB_2D_DST_ADDR_SHIFT (2) -+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2) -+ -+/* -+ * Mask Surface (PSB_2D_MASK_SURF_BH) -+ */ -+/* -+ * WORD 0 -+ */ -+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF) -+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000) -+#define PSB_2D_MASK_STRIDE_SHIFT (0) -+/* -+ * WORD 1 - Base Address -+ */ -+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC) -+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003) -+#define PSB_2D_MASK_ADDR_SHIFT (2) -+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2) -+ -+/* -+ * Source Palette (PSB_2D_SRC_PAL_BH) -+ */ -+ -+#define PSB_2D_SRCPAL_ADDR_SHIFT (0) -+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007) -+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8) -+#define PSB_2D_SRCPAL_BYTEALIGN (1024) -+ -+/* -+ * Pattern Palette (PSB_2D_PAT_PAL_BH) -+ */ -+ -+#define PSB_2D_PATPAL_ADDR_SHIFT (0) -+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007) -+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8) -+#define PSB_2D_PATPAL_BYTEALIGN (1024) -+ -+/* -+ * Rop3 Codes (2 LS bytes) -+ */ -+ -+#define PSB_2D_ROP3_SRCCOPY (0xCCCC) -+#define PSB_2D_ROP3_PATCOPY (0xF0F0) -+#define PSB_2D_ROP3_WHITENESS (0xFFFF) -+#define PSB_2D_ROP3_BLACKNESS (0x0000) -+#define PSB_2D_ROP3_SRC (0xCC) -+#define PSB_2D_ROP3_PAT (0xF0) -+#define PSB_2D_ROP3_DST (0xAA) -+ -+ -+/* -+ * Sizes. -+ */ -+ -+#define PSB_SCENE_HW_COOKIE_SIZE 16 -+#define PSB_TA_MEM_HW_COOKIE_SIZE 16 -+ -+/* -+ * Scene stuff. -+ */ -+ -+#define PSB_NUM_HW_SCENES 2 -+ -+/* -+ * Scheduler completion actions. -+ */ -+ -+#define PSB_RASTER_BLOCK 0 -+#define PSB_RASTER 1 -+#define PSB_RETURN 2 -+#define PSB_TA 3 -+ -+ -+/*Power management*/ -+#define PSB_PUNIT_PORT 0x04 -+#define PSB_PWRGT_CNT 0x60 -+#define PSB_PWRGT_STS 0x61 -+#define PSB_PWRGT_GFX_MASK 0x3 -+#define PSB_PWRGT_VID_ENC_MASK 0x30 -+#define PSB_PWRGT_VID_DEC_MASK 0xc -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c ---- a/drivers/gpu/drm/psb/psb_reset.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_reset.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,423 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: -+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_reg.h" -+#include "psb_scene.h" -+#include "psb_msvdx.h" -+#include "lnc_topaz.h" -+#include <linux/spinlock.h> -+#define PSB_2D_TIMEOUT_MSEC 100 -+ -+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d) -+{ -+ uint32_t val; -+ -+ val = _PSB_CS_RESET_BIF_RESET | -+ _PSB_CS_RESET_DPM_RESET | -+ _PSB_CS_RESET_TA_RESET | -+ _PSB_CS_RESET_USE_RESET | -+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET; -+ -+ if (reset_2d) -+ val |= _PSB_CS_RESET_TWOD_RESET; -+ -+ PSB_WSGX32(val, PSB_CR_SOFT_RESET); -+ (void) PSB_RSGX32(PSB_CR_SOFT_RESET); -+ -+ msleep(1); -+ -+ PSB_WSGX32(0, PSB_CR_SOFT_RESET); -+ wmb(); -+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT, -+ PSB_CR_BIF_CTRL); -+ wmb(); -+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL); -+ -+ msleep(1); -+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT, -+ PSB_CR_BIF_CTRL); -+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL); -+} -+ -+void psb_print_pagefault(struct drm_psb_private *dev_priv) -+{ -+ uint32_t val; -+ uint32_t addr; -+ -+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT); -+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT); -+ -+ if (val) { -+ if (val & _PSB_CBI_STAT_PF_N_RW) -+ DRM_ERROR("Poulsbo MMU page fault:\n"); -+ else -+ DRM_ERROR("Poulsbo MMU read / write " -+ "protection fault:\n"); -+ -+ if (val & _PSB_CBI_STAT_FAULT_CACHE) -+ DRM_ERROR("\tCache requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_TA) -+ DRM_ERROR("\tTA requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_VDM) -+ DRM_ERROR("\tVDM requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_2D) -+ DRM_ERROR("\t2D requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_PBE) -+ DRM_ERROR("\tPBE requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_TSP) -+ DRM_ERROR("\tTSP requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_ISP) -+ DRM_ERROR("\tISP requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS) -+ DRM_ERROR("\tUSSEPDS requestor.\n"); -+ if (val & _PSB_CBI_STAT_FAULT_HOST) -+ DRM_ERROR("\tHost requestor.\n"); -+ -+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", -+ (unsigned) addr); -+ } -+} -+ -+void psb_schedule_watchdog(struct drm_psb_private *dev_priv) -+{ -+ struct timer_list *wt = &dev_priv->watchdog_timer; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ if (dev_priv->timer_available && !timer_pending(wt)) { -+ wt->expires = jiffies + PSB_WATCHDOG_DELAY; -+ add_timer(wt); -+ } -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+} -+ -+#if 0 -+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv, -+ unsigned int engine, int *lockup, -+ int *idle) -+{ -+ uint32_t received_seq; -+ -+ received_seq = dev_priv->comm[engine << 4]; -+ spin_lock(&dev_priv->sequence_lock); -+ *idle = (received_seq == dev_priv->sequence[engine]); -+ spin_unlock(&dev_priv->sequence_lock); -+ -+ if (*idle) { -+ dev_priv->idle[engine] = 1; -+ *lockup = 0; -+ return; -+ } -+ -+ if (dev_priv->idle[engine]) { -+ dev_priv->idle[engine] = 0; -+ dev_priv->last_sequence[engine] = received_seq; -+ *lockup = 0; -+ return; -+ } -+ -+ *lockup = (dev_priv->last_sequence[engine] == received_seq); -+} -+ -+#endif -+static void psb_watchdog_func(unsigned long data) -+{ -+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data; -+ struct drm_device *dev = dev_priv->dev; -+ int lockup; -+ int msvdx_lockup; -+ int msvdx_idle; -+ int lockup_2d; -+#if 0 -+ int topaz_lockup = 0; -+ int topaz_idle = 0; -+#endif -+ int idle_2d; -+ int idle; -+ unsigned long irq_flags; -+ -+ psb_scheduler_lockup(dev_priv, &lockup, &idle); -+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle); -+ -+#if 0 -+ if (IS_MRST(dev)) -+ lnc_topaz_lockup(dev_priv, &topaz_lockup, &topaz_idle); -+#endif -+ -+#if 0 -+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d); -+#else -+ lockup_2d = false; -+ idle_2d = true; -+#endif -+ if (lockup || msvdx_lockup || lockup_2d) { -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ dev_priv->timer_available = 0; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, -+ irq_flags); -+ if (lockup) { -+ psb_print_pagefault(dev_priv); -+ schedule_work(&dev_priv->watchdog_wq); -+ } -+ if (msvdx_lockup) -+ schedule_work(&dev_priv->msvdx_watchdog_wq); -+#if 0 -+ if (IS_MRST(dev) && (topaz_lockup)) -+ schedule_work(&dev_priv->topaz_watchdog_wq); -+#else -+ (void) dev; -+#endif -+ } -+ if (!idle || !msvdx_idle || !idle_2d /* || !topaz_idle */) -+ psb_schedule_watchdog(dev_priv); -+} -+ -+void psb_msvdx_flush_cmd_queue(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct psb_msvdx_cmd_queue *msvdx_cmd; -+ struct list_head *list, *next; -+ /*Flush the msvdx cmd queue and signal all fences in the queue */ -+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) { -+ msvdx_cmd = -+ list_entry(list, struct psb_msvdx_cmd_queue, head); -+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n", -+ msvdx_cmd->sequence); -+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence; -+ psb_fence_error(dev, PSB_ENGINE_VIDEO, -+ dev_priv->msvdx_current_sequence, -+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG); -+ list_del(list); -+ kfree(msvdx_cmd->cmd); -+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), -+ DRM_MEM_DRIVER); -+ } -+} -+ -+static void psb_msvdx_reset_wq(struct work_struct *work) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq); -+ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ mutex_lock(&dev_priv->msvdx_mutex); -+ dev_priv->msvdx_needs_reset = 1; -+ dev_priv->msvdx_current_sequence++; -+ PSB_DEBUG_GENERAL -+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n", -+ dev_priv->msvdx_current_sequence); -+ -+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO, -+ dev_priv->msvdx_current_sequence, -+ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG); -+ -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ dev_priv->timer_available = 1; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+ -+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags); -+ psb_msvdx_flush_cmd_queue(scheduler->dev); -+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags); -+ -+ psb_schedule_watchdog(dev_priv); -+ mutex_unlock(&dev_priv->msvdx_mutex); -+} -+ -+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv) -+{ -+ struct psb_xhw_buf buf; -+ uint32_t bif_ctrl; -+ -+ INIT_LIST_HEAD(&buf.head); -+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); -+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL); -+ PSB_WSGX32(bif_ctrl | -+ _PSB_CB_CTRL_CLEAR_FAULT | -+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL); -+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL); -+ msleep(1); -+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL); -+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL); -+ return psb_xhw_reset_dpm(dev_priv, &buf); -+} -+ -+/* -+ * Block command submission and reset hardware and schedulers. -+ */ -+ -+static void psb_reset_wq(struct work_struct *work) -+{ -+ struct drm_psb_private *dev_priv = -+ container_of(work, struct drm_psb_private, watchdog_wq); -+ int lockup_2d; -+ int idle_2d; -+ unsigned long irq_flags; -+ int ret; -+ int reset_count = 0; -+ struct psb_xhw_buf buf; -+ uint32_t xhw_lockup; -+ -+ /* -+ * Block command submission. -+ */ -+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n"); -+ -+ mutex_lock(&dev_priv->reset_mutex); -+ -+ INIT_LIST_HEAD(&buf.head); -+ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup); -+ if (likely(ret == 0)) { -+ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) { -+ /* -+ * no lockup, just re-schedule -+ */ -+ spin_lock_irqsave(&dev_priv->watchdog_lock, -+ irq_flags); -+ dev_priv->timer_available = 1; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, -+ irq_flags); -+ psb_schedule_watchdog(dev_priv); -+ mutex_unlock(&dev_priv->reset_mutex); -+ return; -+ } -+ } else { -+ DRM_ERROR("Check lockup returned %d\n", ret); -+ } -+#if 0 -+ msleep(PSB_2D_TIMEOUT_MSEC); -+ -+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d); -+ -+ if (lockup_2d) { -+ uint32_t seq_2d; -+ spin_lock(&dev_priv->sequence_lock); -+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D]; -+ spin_unlock(&dev_priv->sequence_lock); -+ psb_fence_error(dev_priv->scheduler.dev, -+ PSB_ENGINE_2D, -+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY); -+ DRM_INFO("Resetting 2D engine.\n"); -+ } -+ -+ psb_reset(dev_priv, lockup_2d); -+#else -+ (void) lockup_2d; -+ (void) idle_2d; -+ psb_reset(dev_priv, 0); -+#endif -+ (void) psb_xhw_mmu_reset(dev_priv); -+ DRM_INFO("Resetting scheduler.\n"); -+ psb_scheduler_pause(dev_priv); -+ psb_scheduler_reset(dev_priv, -EBUSY); -+ psb_scheduler_ta_mem_check(dev_priv); -+ -+ while (dev_priv->ta_mem && -+ !dev_priv->force_ta_mem_load && ++reset_count < 10) { -+ struct ttm_fence_object *fence; -+ -+ /* -+ * TA memory is currently fenced so offsets -+ * are valid. Reload offsets into the dpm now. -+ */ -+ -+ struct psb_xhw_buf buf; -+ INIT_LIST_HEAD(&buf.head); -+ -+ msleep(100); -+ -+ fence = dev_priv->ta_mem->ta_memory->sync_obj; -+ -+ DRM_INFO("Reloading TA memory at offset " -+ "0x%08lx to 0x%08lx seq %d\n", -+ dev_priv->ta_mem->ta_memory->offset, -+ dev_priv->ta_mem->ta_memory->offset + -+ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT), -+ fence->sequence); -+ -+ fence = dev_priv->ta_mem->hw_data->sync_obj; -+ -+ DRM_INFO("Reloading TA HW memory at offset " -+ "0x%08lx to 0x%08lx seq %u\n", -+ dev_priv->ta_mem->hw_data->offset, -+ dev_priv->ta_mem->hw_data->offset + -+ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT), -+ fence->sequence); -+ -+ ret = psb_xhw_ta_mem_load(dev_priv, &buf, -+ PSB_TA_MEM_FLAG_TA | -+ PSB_TA_MEM_FLAG_RASTER | -+ PSB_TA_MEM_FLAG_HOSTA | -+ PSB_TA_MEM_FLAG_HOSTD | -+ PSB_TA_MEM_FLAG_INIT, -+ dev_priv->ta_mem->ta_memory-> -+ offset, -+ dev_priv->ta_mem->hw_data-> -+ offset, -+ dev_priv->ta_mem->hw_cookie); -+ if (!ret) -+ break; -+ -+ DRM_INFO("Reloading TA memory failed. Retrying.\n"); -+ psb_reset(dev_priv, 0); -+ (void) psb_xhw_mmu_reset(dev_priv); -+ } -+ -+ psb_scheduler_restart(dev_priv); -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ dev_priv->timer_available = 1; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+ mutex_unlock(&dev_priv->reset_mutex); -+} -+ -+void psb_watchdog_init(struct drm_psb_private *dev_priv) -+{ -+ struct timer_list *wt = &dev_priv->watchdog_timer; -+ unsigned long irq_flags; -+ -+ spin_lock_init(&dev_priv->watchdog_lock); -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ init_timer(wt); -+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq); -+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq); -+ wt->data = (unsigned long) dev_priv; -+ wt->function = &psb_watchdog_func; -+ dev_priv->timer_available = 1; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+} -+ -+void psb_watchdog_takedown(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags); -+ dev_priv->timer_available = 0; -+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags); -+ (void) del_timer_sync(&dev_priv->watchdog_timer); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c ---- a/drivers/gpu/drm/psb/psb_scene.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_scene.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,523 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_scene.h" -+ -+void psb_clear_scene_atomic(struct psb_scene *scene) -+{ -+ int i; -+ struct page *page; -+ void *v; -+ -+ for (i = 0; i < scene->clear_num_pages; ++i) { -+ page = ttm_tt_get_page(scene->hw_data->ttm, -+ scene->clear_p_start + i); -+ if (in_irq()) -+ v = kmap_atomic(page, KM_IRQ0); -+ else -+ v = kmap_atomic(page, KM_USER0); -+ -+ memset(v, 0, PAGE_SIZE); -+ -+ if (in_irq()) -+ kunmap_atomic(v, KM_IRQ0); -+ else -+ kunmap_atomic(v, KM_USER0); -+ } -+} -+ -+int psb_clear_scene(struct psb_scene *scene) -+{ -+ struct ttm_bo_kmap_obj bmo; -+ bool is_iomem; -+ void *addr; -+ -+ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start, -+ scene->clear_num_pages, &bmo); -+ -+ PSB_DEBUG_RENDER("Scene clear.\n"); -+ if (ret) -+ return ret; -+ -+ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem); -+ BUG_ON(is_iomem); -+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT); -+ ttm_bo_kunmap(&bmo); -+ -+ return 0; -+} -+ -+static void psb_destroy_scene(struct kref *kref) -+{ -+ struct psb_scene *scene = -+ container_of(kref, struct psb_scene, kref); -+ -+ PSB_DEBUG_RENDER("Scene destroy.\n"); -+ psb_scheduler_remove_scene_refs(scene); -+ ttm_bo_unref(&scene->hw_data); -+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER); -+} -+ -+void psb_scene_unref(struct psb_scene **p_scene) -+{ -+ struct psb_scene *scene = *p_scene; -+ -+ PSB_DEBUG_RENDER("Scene unref.\n"); -+ *p_scene = NULL; -+ kref_put(&scene->kref, &psb_destroy_scene); -+} -+ -+struct psb_scene *psb_scene_ref(struct psb_scene *src) -+{ -+ PSB_DEBUG_RENDER("Scene ref.\n"); -+ kref_get(&src->kref); -+ return src; -+} -+ -+static struct psb_scene *psb_alloc_scene(struct drm_device *dev, -+ uint32_t w, uint32_t h) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ int ret = -EINVAL; -+ struct psb_scene *scene; -+ uint32_t bo_size; -+ struct psb_xhw_buf buf; -+ -+ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h, -+ w >> 16); -+ -+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER); -+ -+ if (!scene) { -+ DRM_ERROR("Out of memory allocating scene object.\n"); -+ return NULL; -+ } -+ -+ scene->dev = dev; -+ scene->w = w; -+ scene->h = h; -+ scene->hw_scene = NULL; -+ kref_init(&scene->kref); -+ -+ INIT_LIST_HEAD(&buf.head); -+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h, -+ scene->hw_cookie, &bo_size, -+ &scene->clear_p_start, -+ &scene->clear_num_pages); -+ if (ret) -+ goto out_err; -+ -+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU | -+ TTM_PL_FLAG_CACHED, -+ 0, 0, 1, NULL, &scene->hw_data); -+ if (ret) -+ goto out_err; -+ -+ return scene; -+out_err: -+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER); -+ return NULL; -+} -+ -+int psb_validate_scene_pool(struct psb_context *context, -+ struct psb_scene_pool *pool, -+ uint32_t w, -+ uint32_t h, -+ int final_pass, struct psb_scene **scene_p) -+{ -+ struct drm_device *dev = pool->dev; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct psb_scene *scene = pool->scenes[pool->cur_scene]; -+ int ret; -+ unsigned long irq_flags; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ uint32_t bin_pt_offset; -+ uint32_t bin_param_offset; -+ -+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", -+ pool->cur_scene); -+ -+ if (unlikely(!dev_priv->ta_mem)) { -+ dev_priv->ta_mem = -+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages); -+ if (!dev_priv->ta_mem) -+ return -ENOMEM; -+ -+ bin_pt_offset = ~0; -+ bin_param_offset = ~0; -+ } else { -+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset; -+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset; -+ } -+ -+ pool->w = w; -+ pool->h = h; -+ if (scene && (scene->w != pool->w || scene->h != pool->h)) { -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) { -+ spin_unlock_irqrestore(&scheduler->lock, -+ irq_flags); -+ DRM_ERROR("Trying to resize a dirty scene.\n"); -+ return -EINVAL; -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ psb_scene_unref(&pool->scenes[pool->cur_scene]); -+ scene = NULL; -+ } -+ -+ if (!scene) { -+ pool->scenes[pool->cur_scene] = scene = -+ psb_alloc_scene(pool->dev, pool->w, pool->h); -+ -+ if (!scene) -+ return -ENOMEM; -+ -+ scene->flags = PSB_SCENE_FLAG_CLEARED; -+ } -+ -+ ret = psb_validate_kernel_buffer(context, scene->hw_data, -+ PSB_ENGINE_TA, -+ PSB_BO_FLAG_SCENE | -+ PSB_GPU_ACCESS_READ | -+ PSB_GPU_ACCESS_WRITE, 0); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ /* -+ * FIXME: We need atomic bit manipulation here for the -+ * scheduler. For now use the spinlock. -+ */ -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) { -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n"); -+ mutex_lock(&scene->hw_data->mutex); -+ -+ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0); -+ mutex_unlock(&scene->hw_data->mutex); -+ if (ret) -+ return ret; -+ -+ ret = psb_clear_scene(scene); -+ -+ if (ret) -+ return ret; -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ scene->flags |= PSB_SCENE_FLAG_CLEARED; -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ -+ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data, -+ PSB_ENGINE_TA, -+ PSB_BO_FLAG_SCENE | -+ PSB_GPU_ACCESS_READ | -+ PSB_GPU_ACCESS_WRITE, 0); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ ret = -+ psb_validate_kernel_buffer(context, -+ dev_priv->ta_mem->ta_memory, -+ PSB_ENGINE_TA, -+ PSB_BO_FLAG_SCENE | -+ PSB_GPU_ACCESS_READ | -+ PSB_GPU_ACCESS_WRITE, 0); -+ -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ if (unlikely(bin_param_offset != -+ dev_priv->ta_mem->ta_memory->offset || -+ bin_pt_offset != -+ dev_priv->ta_mem->hw_data->offset || -+ dev_priv->force_ta_mem_load)) { -+ -+ struct psb_xhw_buf buf; -+ -+ INIT_LIST_HEAD(&buf.head); -+ ret = psb_xhw_ta_mem_load(dev_priv, &buf, -+ PSB_TA_MEM_FLAG_TA | -+ PSB_TA_MEM_FLAG_RASTER | -+ PSB_TA_MEM_FLAG_HOSTA | -+ PSB_TA_MEM_FLAG_HOSTD | -+ PSB_TA_MEM_FLAG_INIT, -+ dev_priv->ta_mem->ta_memory-> -+ offset, -+ dev_priv->ta_mem->hw_data-> -+ offset, -+ dev_priv->ta_mem->hw_cookie); -+ if (ret) -+ return ret; -+ -+ dev_priv->force_ta_mem_load = 0; -+ } -+ -+ if (final_pass) { -+ -+ /* -+ * Clear the scene on next use. Advance the scene counter. -+ */ -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED; -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes; -+ } -+ -+ *scene_p = psb_scene_ref(scene); -+ return 0; -+} -+ -+static void psb_scene_pool_destroy(struct kref *kref) -+{ -+ struct psb_scene_pool *pool = -+ container_of(kref, struct psb_scene_pool, kref); -+ int i; -+ PSB_DEBUG_RENDER("Scene pool destroy.\n"); -+ -+ for (i = 0; i < pool->num_scenes; ++i) { -+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i, -+ (unsigned long) pool->scenes[i]); -+ if (pool->scenes[i]) -+ psb_scene_unref(&pool->scenes[i]); -+ } -+ -+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER); -+} -+ -+void psb_scene_pool_unref(struct psb_scene_pool **p_pool) -+{ -+ struct psb_scene_pool *pool = *p_pool; -+ -+ PSB_DEBUG_RENDER("Scene pool unref\n"); -+ *p_pool = NULL; -+ kref_put(&pool->kref, &psb_scene_pool_destroy); -+} -+ -+struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src) -+{ -+ kref_get(&src->kref); -+ return src; -+} -+ -+/* -+ * Callback for base object manager. -+ */ -+ -+static void psb_scene_pool_release(struct ttm_base_object **p_base) -+{ -+ struct ttm_base_object *base = *p_base; -+ struct psb_scene_pool *pool = -+ container_of(base, struct psb_scene_pool, base); -+ *p_base = NULL; -+ -+ psb_scene_pool_unref(&pool); -+} -+ -+struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv, -+ uint32_t handle, -+ int check_owner) -+{ -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ struct ttm_base_object *base; -+ struct psb_scene_pool *pool; -+ -+ -+ base = ttm_base_object_lookup(tfile, handle); -+ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) { -+ DRM_ERROR("Could not find scene pool object 0x%08x\n", -+ handle); -+ return NULL; -+ } -+ -+ if (check_owner && tfile != base->tfile && !base->shareable) { -+ ttm_base_object_unref(&base); -+ return NULL; -+ } -+ -+ pool = container_of(base, struct psb_scene_pool, base); -+ kref_get(&pool->kref); -+ ttm_base_object_unref(&base); -+ return pool; -+} -+ -+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv, -+ int shareable, -+ uint32_t num_scenes, -+ uint32_t w, uint32_t h) -+{ -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ struct drm_device *dev = file_priv->minor->dev; -+ struct psb_scene_pool *pool; -+ int ret; -+ -+ PSB_DEBUG_RENDER("Scene pool alloc\n"); -+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER); -+ if (!pool) { -+ DRM_ERROR("Out of memory allocating scene pool object.\n"); -+ return NULL; -+ } -+ pool->w = w; -+ pool->h = h; -+ pool->dev = dev; -+ pool->num_scenes = num_scenes; -+ kref_init(&pool->kref); -+ -+ /* -+ * The base object holds a reference. -+ */ -+ -+ kref_get(&pool->kref); -+ ret = ttm_base_object_init(tfile, &pool->base, shareable, -+ PSB_USER_OBJECT_SCENE_POOL, -+ &psb_scene_pool_release, NULL); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ return pool; -+out_err: -+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER); -+ return NULL; -+} -+ -+/* -+ * Code to support multiple ta memory buffers. -+ */ -+ -+static void psb_ta_mem_destroy(struct kref *kref) -+{ -+ struct psb_ta_mem *ta_mem = -+ container_of(kref, struct psb_ta_mem, kref); -+ -+ ttm_bo_unref(&ta_mem->hw_data); -+ ttm_bo_unref(&ta_mem->ta_memory); -+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER); -+} -+ -+void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem) -+{ -+ struct psb_ta_mem *ta_mem = *p_ta_mem; -+ *p_ta_mem = NULL; -+ kref_put(&ta_mem->kref, psb_ta_mem_destroy); -+} -+ -+struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src) -+{ -+ kref_get(&src->kref); -+ return src; -+} -+ -+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_bo_device *bdev = &dev_priv->bdev; -+ int ret = -EINVAL; -+ struct psb_ta_mem *ta_mem; -+ uint32_t bo_size; -+ uint32_t ta_min_size; -+ struct psb_xhw_buf buf; -+ -+ INIT_LIST_HEAD(&buf.head); -+ -+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER); -+ -+ if (!ta_mem) { -+ DRM_ERROR("Out of memory allocating parameter memory.\n"); -+ return NULL; -+ } -+ -+ kref_init(&ta_mem->kref); -+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages, -+ ta_mem->hw_cookie, -+ &bo_size, -+ &ta_min_size); -+ if (ret == -ENOMEM) { -+ DRM_ERROR("Parameter memory size is too small.\n"); -+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n", -+ (unsigned int) (pages * (PAGE_SIZE / 1024))); -+ DRM_INFO("The Xpsb driver thinks this is too small and\n"); -+ DRM_INFO("suggests %u kiB. Check the psb DRM\n", -+ (unsigned int)(ta_min_size / 1024)); -+ DRM_INFO("\"ta_mem_size\" parameter!\n"); -+ } -+ if (ret) -+ goto out_err0; -+ -+ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_MMU, -+ 0, 0, 0, NULL, -+ &ta_mem->hw_data); -+ if (ret) -+ goto out_err0; -+ -+ bo_size = pages * PAGE_SIZE; -+ ret = -+ ttm_buffer_object_create(bdev, bo_size, -+ ttm_bo_type_kernel, -+ DRM_PSB_FLAG_MEM_RASTGEOM, -+ 0, -+ 1024 * 1024 >> PAGE_SHIFT, 0, -+ NULL, -+ &ta_mem->ta_memory); -+ if (ret) -+ goto out_err1; -+ -+ return ta_mem; -+out_err1: -+ ttm_bo_unref(&ta_mem->hw_data); -+out_err0: -+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER); -+ return NULL; -+} -+ -+int drm_psb_scene_unref_ioctl(struct drm_device *dev, -+ void *data, struct drm_file *file_priv) -+{ -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ struct drm_psb_scene *scene = (struct drm_psb_scene *) data; -+ int ret = 0; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ if (!scene->handle_valid) -+ return 0; -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ -+ ret = -+ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE); -+ if (unlikely(ret != 0)) -+ DRM_ERROR("Could not unreference a scene object.\n"); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h ---- a/drivers/gpu/drm/psb/psb_scene.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_scene.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,119 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+ -+#ifndef _PSB_SCENE_H_ -+#define _PSB_SCENE_H_ -+ -+#include "ttm/ttm_object.h" -+ -+#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0 -+#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1 -+#define PSB_MAX_NUM_SCENES 8 -+ -+struct psb_hw_scene; -+struct psb_hw_ta_mem; -+ -+struct psb_scene_pool { -+ struct ttm_base_object base; -+ struct drm_device *dev; -+ struct kref kref; -+ uint32_t w; -+ uint32_t h; -+ uint32_t cur_scene; -+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES]; -+ uint32_t num_scenes; -+}; -+ -+struct psb_scene { -+ struct drm_device *dev; -+ struct kref kref; -+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE]; -+ uint32_t bo_size; -+ uint32_t w; -+ uint32_t h; -+ struct psb_ta_mem *ta_mem; -+ struct psb_hw_scene *hw_scene; -+ struct ttm_buffer_object *hw_data; -+ uint32_t flags; -+ uint32_t clear_p_start; -+ uint32_t clear_num_pages; -+}; -+ -+#if 0 -+struct psb_scene_entry { -+ struct list_head head; -+ struct psb_scene *scene; -+}; -+ -+struct psb_user_scene { -+ struct ttm_base_object base; -+ struct drm_device *dev; -+}; -+ -+#endif -+ -+struct psb_ta_mem { -+ struct ttm_base_object base; -+ struct drm_device *dev; -+ struct kref kref; -+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE]; -+ uint32_t bo_size; -+ struct ttm_buffer_object *ta_memory; -+ struct ttm_buffer_object *hw_data; -+ int is_deallocating; -+ int deallocating_scheduled; -+}; -+ -+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv, -+ int shareable, -+ uint32_t num_scenes, -+ uint32_t w, uint32_t h); -+extern void psb_scene_pool_unref(struct psb_scene_pool **pool); -+extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file -+ *priv, -+ uint32_t handle, -+ int check_owner); -+extern int psb_validate_scene_pool(struct psb_context *context, -+ struct psb_scene_pool *pool, -+ uint32_t w, -+ uint32_t h, int final_pass, -+ struct psb_scene **scene_p); -+extern void psb_scene_unref(struct psb_scene **scene); -+extern struct psb_scene *psb_scene_ref(struct psb_scene *src); -+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev, -+ void *data, -+ struct drm_file *file_priv); -+ -+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool) -+{ -+ return pool->base.hash.key; -+} -+ -+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, -+ uint32_t pages); -+extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src); -+extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c ---- a/drivers/gpu/drm/psb/psb_schedule.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_schedule.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1539 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drm.h" -+#include "psb_drv.h" -+#include "psb_reg.h" -+#include "psb_scene.h" -+#include "ttm/ttm_execbuf_util.h" -+ -+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 30) -+#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 30) -+#define PSB_RASTER_TIMEOUT (DRM_HZ / 10) -+#define PSB_TA_TIMEOUT (DRM_HZ / 10) -+ -+#undef PSB_SOFTWARE_WORKAHEAD -+ -+#ifdef PSB_STABLE_SETTING -+ -+/* -+ * Software blocks completely while the engines are working so there can be no -+ * overlap. -+ */ -+ -+#define PSB_WAIT_FOR_RASTER_COMPLETION -+#define PSB_WAIT_FOR_TA_COMPLETION -+ -+#elif defined(PSB_PARANOID_SETTING) -+/* -+ * Software blocks "almost" while the engines are working so there can be no -+ * overlap. -+ */ -+ -+#define PSB_WAIT_FOR_RASTER_COMPLETION -+#define PSB_WAIT_FOR_TA_COMPLETION -+#define PSB_BE_PARANOID -+ -+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP) -+/* -+ * Software leaps ahead while the rasterizer is running and prepares -+ * a new ta job that can be scheduled before the rasterizer has -+ * finished. -+ */ -+ -+#define PSB_WAIT_FOR_TA_COMPLETION -+ -+#elif defined(PSB_SOFTWARE_WORKAHEAD) -+/* -+ * Don't sync, but allow software to work ahead. and queue a number of jobs. -+ * But block overlapping in the scheduler. -+ */ -+ -+#define PSB_BLOCK_OVERLAP -+#define ONLY_ONE_JOB_IN_RASTER_QUEUE -+ -+#endif -+ -+/* -+ * Avoid pixelbe pagefaults on C0. -+ */ -+#if 0 -+#define PSB_BLOCK_OVERLAP -+#endif -+ -+static void psb_dispatch_ta(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler, -+ uint32_t reply_flag); -+static void psb_dispatch_raster(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler, -+ uint32_t reply_flag); -+ -+#ifdef FIX_TG_16 -+ -+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv); -+static int psb_check_2d_idle(struct drm_psb_private *dev_priv); -+ -+#endif -+ -+void psb_scheduler_lockup(struct drm_psb_private *dev_priv, -+ int *lockup, int *idle) -+{ -+ unsigned long irq_flags; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ -+ *lockup = 0; -+ *idle = 1; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ -+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL && -+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) { -+ *lockup = 1; -+ } -+ if (!*lockup -+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) -+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) { -+ *lockup = 1; -+ } -+ if (!*lockup) -+ *idle = scheduler->idle; -+ -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+static inline void psb_set_idle(struct psb_scheduler *scheduler) -+{ -+ scheduler->idle = -+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) && -+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL); -+ if (scheduler->idle) -+ wake_up(&scheduler->idle_queue); -+} -+ -+/* -+ * Call with the scheduler spinlock held. -+ * Assigns a scene context to either the ta or the rasterizer, -+ * flushing out other scenes to memory if necessary. -+ */ -+ -+static int psb_set_scene_fire(struct psb_scheduler *scheduler, -+ struct psb_scene *scene, -+ int engine, struct psb_task *task) -+{ -+ uint32_t flags = 0; -+ struct psb_hw_scene *hw_scene; -+ struct drm_device *dev = scene->dev; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ -+ hw_scene = scene->hw_scene; -+ if (hw_scene && hw_scene->last_scene == scene) { -+ -+ /* -+ * Reuse the last hw scene context and delete it from the -+ * free list. -+ */ -+ -+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n", -+ hw_scene->context_number); -+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) { -+ -+ /* -+ * No hw context initialization to be done. -+ */ -+ -+ flags |= PSB_SCENE_FLAG_SETUP_ONLY; -+ } -+ -+ list_del_init(&hw_scene->head); -+ -+ } else { -+ struct list_head *list; -+ hw_scene = NULL; -+ -+ /* -+ * Grab a new hw scene context. -+ */ -+ -+ list_for_each(list, &scheduler->hw_scenes) { -+ hw_scene = -+ list_entry(list, struct psb_hw_scene, head); -+ break; -+ } -+ BUG_ON(!hw_scene); -+ PSB_DEBUG_RENDER("New hw scene %d.\n", -+ hw_scene->context_number); -+ -+ list_del_init(list); -+ } -+ scene->hw_scene = hw_scene; -+ hw_scene->last_scene = scene; -+ -+ flags |= PSB_SCENE_FLAG_SETUP; -+ -+ /* -+ * Switch context and setup the engine. -+ */ -+ -+ return psb_xhw_scene_bind_fire(dev_priv, -+ &task->buf, -+ task->flags, -+ hw_scene->context_number, -+ scene->hw_cookie, -+ task->oom_cmds, -+ task->oom_cmd_size, -+ scene->hw_data->offset, -+ engine, flags | scene->flags); -+} -+ -+static inline void psb_report_fence(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler, -+ uint32_t class, -+ uint32_t sequence, -+ uint32_t type, int call_handler) -+{ -+ struct psb_scheduler_seq *seq = &scheduler->seq[type]; -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA]; -+ unsigned long irq_flags; -+ -+ /** -+ * Block racing poll_ta calls, that take the lock in write mode. -+ */ -+ -+ read_lock_irqsave(&fc->lock, irq_flags); -+ seq->sequence = sequence; -+ seq->reported = 0; -+ read_unlock_irqrestore(&fc->lock, irq_flags); -+ -+ if (call_handler) -+ psb_fence_handler(scheduler->dev, class); -+} -+ -+static void psb_schedule_raster(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler); -+ -+static void psb_schedule_ta(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = NULL; -+ struct list_head *list, *next; -+ int pushed_raster_task = 0; -+ -+ PSB_DEBUG_RENDER("schedule ta\n"); -+ -+ if (scheduler->idle_count != 0) -+ return; -+ -+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) -+ return; -+ -+ if (scheduler->ta_state) -+ return; -+ -+ /* -+ * Skip the ta stage for rasterization-only -+ * tasks. They arrive here to make sure we're rasterizing -+ * tasks in the correct order. -+ */ -+ -+ list_for_each_safe(list, next, &scheduler->ta_queue) { -+ task = list_entry(list, struct psb_task, head); -+ if (task->task_type != psb_raster_task) -+ break; -+ -+ list_del_init(list); -+ list_add_tail(list, &scheduler->raster_queue); -+ psb_report_fence(dev_priv, scheduler, task->engine, -+ task->sequence, -+ _PSB_FENCE_TA_DONE_SHIFT, 1); -+ task = NULL; -+ pushed_raster_task = 1; -+ } -+ -+ if (pushed_raster_task) -+ psb_schedule_raster(dev_priv, scheduler); -+ -+ if (!task) -+ return; -+ -+ /* -+ * Still waiting for a vistest? -+ */ -+ -+ if (scheduler->feedback_task == task) -+ return; -+ -+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE -+ -+ /* -+ * Block ta from trying to use both hardware contexts -+ * without the rasterizer starting to render from one of them. -+ */ -+ -+ if (!list_empty(&scheduler->raster_queue)) -+ return; -+ -+#endif -+ -+#ifdef PSB_BLOCK_OVERLAP -+ /* -+ * Make sure rasterizer isn't doing anything. -+ */ -+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) -+ return; -+#endif -+ if (list_empty(&scheduler->hw_scenes)) -+ return; -+ -+#ifdef FIX_TG_16 -+ if (psb_check_2d_idle(dev_priv)) -+ return; -+#endif -+ -+ list_del_init(&task->head); -+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) -+ scheduler->ta_state = 1; -+ -+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task; -+ scheduler->idle = 0; -+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT; -+ scheduler->total_ta_jiffies = 0; -+ -+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ? -+ 0x00000000 : PSB_RF_FIRE_TA; -+ -+ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size); -+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, -+ task); -+ psb_schedule_watchdog(dev_priv); -+} -+ -+static int psb_fire_raster(struct psb_scheduler *scheduler, -+ struct psb_task *task) -+{ -+ struct drm_device *dev = scheduler->dev; -+ struct drm_psb_private *dev_priv = (struct drm_psb_private *) -+ dev->dev_private; -+ -+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence); -+ -+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags); -+} -+ -+/* -+ * Take the first rasterization task from the hp raster queue or from the -+ * raster queue and fire the rasterizer. -+ */ -+ -+static void psb_schedule_raster(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task; -+ struct list_head *list; -+ -+ if (scheduler->idle_count != 0) -+ return; -+ -+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) { -+ PSB_DEBUG_RENDER("Raster busy.\n"); -+ return; -+ } -+#ifdef PSB_BLOCK_OVERLAP -+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) { -+ PSB_DEBUG_RENDER("TA busy.\n"); -+ return; -+ } -+#endif -+ -+ if (!list_empty(&scheduler->hp_raster_queue)) -+ list = scheduler->hp_raster_queue.next; -+ else if (!list_empty(&scheduler->raster_queue)) -+ list = scheduler->raster_queue.next; -+ else { -+ PSB_DEBUG_RENDER("Nothing in list\n"); -+ return; -+ } -+ -+ task = list_entry(list, struct psb_task, head); -+ -+ /* -+ * Sometimes changing ZLS format requires an ISP reset. -+ * Doesn't seem to consume too much time. -+ */ -+ -+ if (task->scene) -+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET); -+ -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task; -+ -+ list_del_init(list); -+ scheduler->idle = 0; -+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT; -+ scheduler->total_raster_jiffies = 0; -+ -+ if (task->scene) -+ PSB_WSGX32(0, PSB_CR_SOFT_RESET); -+ -+ (void) psb_reg_submit(dev_priv, task->raster_cmds, -+ task->raster_cmd_size); -+ -+ if (task->scene) { -+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ? -+ 0x00000000 : PSB_RF_FIRE_RASTER; -+ psb_set_scene_fire(scheduler, -+ task->scene, PSB_SCENE_ENGINE_RASTER, -+ task); -+ } else { -+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER; -+ psb_fire_raster(scheduler, task); -+ } -+ psb_schedule_watchdog(dev_priv); -+} -+ -+int psb_extend_timeout(struct drm_psb_private *dev_priv, -+ uint32_t xhw_lockup) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ int ret = -EBUSY; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ -+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL && -+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) { -+ if (xhw_lockup & PSB_LOCKUP_TA) { -+ goto out_unlock; -+ } else { -+ scheduler->total_ta_jiffies += -+ jiffies - scheduler->ta_end_jiffies + -+ PSB_TA_TIMEOUT; -+ if (scheduler->total_ta_jiffies > -+ PSB_ALLOWED_TA_RUNTIME) -+ goto out_unlock; -+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT; -+ } -+ } -+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL && -+ time_after_eq(jiffies, scheduler->raster_end_jiffies)) { -+ if (xhw_lockup & PSB_LOCKUP_RASTER) { -+ goto out_unlock; -+ } else { -+ scheduler->total_raster_jiffies += -+ jiffies - scheduler->raster_end_jiffies + -+ PSB_RASTER_TIMEOUT; -+ if (scheduler->total_raster_jiffies > -+ PSB_ALLOWED_RASTER_RUNTIME) -+ goto out_unlock; -+ scheduler->raster_end_jiffies = -+ jiffies + PSB_RASTER_TIMEOUT; -+ } -+ } -+ -+ ret = 0; -+ -+out_unlock: -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ return ret; -+} -+ -+/* -+ * TA done handler. -+ */ -+ -+static void psb_ta_done(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ struct psb_scene *scene = task->scene; -+ -+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence); -+ -+ switch (task->ta_complete_action) { -+ case PSB_RASTER_BLOCK: -+ scheduler->ta_state = 1; -+ scene->flags |= -+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE); -+ list_add_tail(&task->head, &scheduler->raster_queue); -+ break; -+ case PSB_RASTER: -+ scene->flags |= -+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE); -+ list_add_tail(&task->head, &scheduler->raster_queue); -+ break; -+ case PSB_RETURN: -+ scheduler->ta_state = 0; -+ scene->flags |= PSB_SCENE_FLAG_DIRTY; -+ list_add_tail(&scene->hw_scene->head, -+ &scheduler->hw_scenes); -+ -+ break; -+ } -+ -+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL; -+ -+#ifdef FIX_TG_16 -+ psb_2d_atomic_unlock(dev_priv); -+#endif -+ -+ if (task->ta_complete_action != PSB_RASTER_BLOCK) -+ psb_report_fence(dev_priv, scheduler, task->engine, -+ task->sequence, -+ _PSB_FENCE_TA_DONE_SHIFT, 1); -+ -+ psb_schedule_raster(dev_priv, scheduler); -+ psb_schedule_ta(dev_priv, scheduler); -+ psb_set_idle(scheduler); -+ -+ if (task->ta_complete_action != PSB_RETURN) -+ return; -+ -+ list_add_tail(&task->head, &scheduler->task_done_queue); -+ schedule_delayed_work(&scheduler->wq, 1); -+} -+ -+/* -+ * Rasterizer done handler. -+ */ -+ -+static void psb_raster_done(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; -+ struct psb_scene *scene = task->scene; -+ uint32_t complete_action = task->raster_complete_action; -+ -+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence); -+ -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL; -+ -+ if (complete_action != PSB_RASTER) -+ psb_schedule_raster(dev_priv, scheduler); -+ -+ if (scene) { -+ if (task->feedback.page) { -+ if (unlikely(scheduler->feedback_task)) { -+ /* -+ * This should never happen, since the previous -+ * feedback query will return before the next -+ * raster task is fired. -+ */ -+ DRM_ERROR("Feedback task busy.\n"); -+ } -+ scheduler->feedback_task = task; -+ psb_xhw_vistest(dev_priv, &task->buf); -+ } -+ switch (complete_action) { -+ case PSB_RETURN: -+ scene->flags &= -+ ~(PSB_SCENE_FLAG_DIRTY | -+ PSB_SCENE_FLAG_COMPLETE); -+ list_add_tail(&scene->hw_scene->head, -+ &scheduler->hw_scenes); -+ psb_report_fence(dev_priv, scheduler, task->engine, -+ task->sequence, -+ _PSB_FENCE_SCENE_DONE_SHIFT, 1); -+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) -+ scheduler->ta_state = 0; -+ -+ break; -+ case PSB_RASTER: -+ list_add(&task->head, &scheduler->raster_queue); -+ task->raster_complete_action = PSB_RETURN; -+ psb_schedule_raster(dev_priv, scheduler); -+ break; -+ case PSB_TA: -+ list_add(&task->head, &scheduler->ta_queue); -+ scheduler->ta_state = 0; -+ task->raster_complete_action = PSB_RETURN; -+ task->ta_complete_action = PSB_RASTER; -+ break; -+ -+ } -+ } -+ psb_schedule_ta(dev_priv, scheduler); -+ psb_set_idle(scheduler); -+ -+ if (complete_action == PSB_RETURN) { -+ if (task->scene == NULL) { -+ psb_report_fence(dev_priv, scheduler, task->engine, -+ task->sequence, -+ _PSB_FENCE_RASTER_DONE_SHIFT, 1); -+ } -+ if (!task->feedback.page) { -+ list_add_tail(&task->head, -+ &scheduler->task_done_queue); -+ schedule_delayed_work(&scheduler->wq, 1); -+ } -+ } -+} -+ -+void psb_scheduler_pause(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ scheduler->idle_count++; -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+void psb_scheduler_restart(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ if (--scheduler->idle_count == 0) { -+ psb_schedule_ta(dev_priv, scheduler); -+ psb_schedule_raster(dev_priv, scheduler); -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+int psb_scheduler_idle(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ int ret; -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ ret = scheduler->idle_count != 0 && scheduler->idle; -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ return ret; -+} -+ -+int psb_scheduler_finished(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ int ret; -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ ret = (scheduler->idle && -+ list_empty(&scheduler->raster_queue) && -+ list_empty(&scheduler->ta_queue) && -+ list_empty(&scheduler->hp_raster_queue)); -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ return ret; -+} -+ -+static void psb_ta_oom(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ if (!task) -+ return; -+ -+ if (task->aborting) -+ return; -+ task->aborting = 1; -+ -+ DRM_INFO("Info: TA out of parameter memory.\n"); -+ -+ (void) psb_xhw_ta_oom(dev_priv, &task->buf, -+ task->scene->hw_cookie); -+} -+ -+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ uint32_t flags; -+ if (!task) -+ return; -+ -+ psb_xhw_ta_oom_reply(dev_priv, &task->buf, -+ task->scene->hw_cookie, -+ &task->ta_complete_action, -+ &task->raster_complete_action, &flags); -+ task->flags |= flags; -+ task->aborting = 0; -+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY); -+} -+ -+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ DRM_ERROR("TA hw scene freed.\n"); -+} -+ -+static void psb_vistest_reply(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = scheduler->feedback_task; -+ uint8_t *feedback_map; -+ uint32_t add; -+ uint32_t cur; -+ struct drm_psb_vistest *vistest; -+ int i; -+ -+ scheduler->feedback_task = NULL; -+ if (!task) { -+ DRM_ERROR("No Poulsbo feedback task.\n"); -+ return; -+ } -+ if (!task->feedback.page) { -+ DRM_ERROR("No Poulsbo feedback page.\n"); -+ goto out; -+ } -+ -+ if (in_irq()) -+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0); -+ else -+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0); -+ -+ /* -+ * Loop over all requested vistest components here. -+ * Only one (vistest) currently. -+ */ -+ -+ vistest = (struct drm_psb_vistest *) -+ (feedback_map + task->feedback.offset); -+ -+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) { -+ add = task->buf.arg.arg.feedback[i]; -+ cur = vistest->vt[i]; -+ -+ /* -+ * Vistest saturates. -+ */ -+ -+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add; -+ } -+ if (in_irq()) -+ kunmap_atomic(feedback_map, KM_IRQ0); -+ else -+ kunmap_atomic(feedback_map, KM_USER0); -+out: -+ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence, -+ _PSB_FENCE_FEEDBACK_SHIFT, 1); -+ -+ if (list_empty(&task->head)) { -+ list_add_tail(&task->head, &scheduler->task_done_queue); -+ schedule_delayed_work(&scheduler->wq, 1); -+ } else -+ psb_schedule_ta(dev_priv, scheduler); -+} -+ -+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ -+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie); -+ -+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA); -+} -+ -+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; -+ uint32_t reply_flags; -+ -+ if (!task) { -+ DRM_ERROR("Null task.\n"); -+ return; -+ } -+ -+ task->raster_complete_action = task->buf.arg.arg.sb.rca; -+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie); -+ -+ reply_flags = PSB_RF_FIRE_RASTER; -+ if (task->raster_complete_action == PSB_RASTER) -+ reply_flags |= PSB_RF_DEALLOC; -+ -+ psb_dispatch_raster(dev_priv, scheduler, reply_flags); -+} -+ -+static int psb_user_interrupt(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler) -+{ -+ uint32_t type; -+ int ret; -+ unsigned long irq_flags; -+ -+ /* -+ * Xhw cannot write directly to the comm page, so -+ * do it here. Firmware would have written directly. -+ */ -+ -+ ret = psb_xhw_handler(dev_priv); -+ if (unlikely(ret)) -+ return ret; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ type = dev_priv->comm[PSB_COMM_USER_IRQ]; -+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0; -+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) { -+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0; -+ DRM_ERROR("Lost Poulsbo hardware event.\n"); -+ } -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ -+ if (type == 0) -+ return 0; -+ -+ switch (type) { -+ case PSB_UIRQ_VISTEST: -+ psb_vistest_reply(dev_priv, scheduler); -+ break; -+ case PSB_UIRQ_OOM_REPLY: -+ psb_ta_oom_reply(dev_priv, scheduler); -+ break; -+ case PSB_UIRQ_FIRE_TA_REPLY: -+ psb_ta_fire_reply(dev_priv, scheduler); -+ break; -+ case PSB_UIRQ_FIRE_RASTER_REPLY: -+ psb_raster_fire_reply(dev_priv, scheduler); -+ break; -+ default: -+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type); -+ } -+ return 0; -+} -+ -+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ int ret; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ ret = psb_user_interrupt(dev_priv, scheduler); -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ return ret; -+} -+ -+static void psb_dispatch_ta(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler, -+ uint32_t reply_flag) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ uint32_t flags; -+ uint32_t mask; -+ -+ task->reply_flags |= reply_flag; -+ flags = task->reply_flags; -+ mask = PSB_RF_FIRE_TA; -+ -+ if (!(flags & mask)) -+ return; -+ -+ mask = PSB_RF_TA_DONE; -+ if ((flags & mask) == mask) { -+ task->reply_flags &= ~mask; -+ psb_ta_done(dev_priv, scheduler); -+ } -+ -+ mask = PSB_RF_OOM; -+ if ((flags & mask) == mask) { -+ task->reply_flags &= ~mask; -+ psb_ta_oom(dev_priv, scheduler); -+ } -+ -+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE); -+ if ((flags & mask) == mask) { -+ task->reply_flags &= ~mask; -+ psb_ta_done(dev_priv, scheduler); -+ } -+} -+ -+static void psb_dispatch_raster(struct drm_psb_private *dev_priv, -+ struct psb_scheduler *scheduler, -+ uint32_t reply_flag) -+{ -+ struct psb_task *task = -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; -+ uint32_t flags; -+ uint32_t mask; -+ -+ task->reply_flags |= reply_flag; -+ flags = task->reply_flags; -+ mask = PSB_RF_FIRE_RASTER; -+ -+ if (!(flags & mask)) -+ return; -+ -+ /* -+ * For rasterizer-only tasks, don't report fence done here, -+ * as this is time consuming and the rasterizer wants a new -+ * task immediately. For other tasks, the hardware is probably -+ * still busy deallocating TA memory, so we can report -+ * fence done in parallel. -+ */ -+ -+ if (task->raster_complete_action == PSB_RETURN && -+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) { -+ psb_report_fence(dev_priv, scheduler, task->engine, -+ task->sequence, -+ _PSB_FENCE_RASTER_DONE_SHIFT, 1); -+ } -+ -+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC; -+ if ((flags & mask) == mask) { -+ task->reply_flags &= ~mask; -+ psb_raster_done(dev_priv, scheduler); -+ } -+} -+ -+void psb_scheduler_handler(struct drm_psb_private *dev_priv, -+ uint32_t status) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ -+ spin_lock(&scheduler->lock); -+ -+ if (status & _PSB_CE_PIXELBE_END_RENDER) { -+ psb_dispatch_raster(dev_priv, scheduler, -+ PSB_RF_RASTER_DONE); -+ } -+ if (status & _PSB_CE_DPM_3D_MEM_FREE) -+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC); -+ -+ if (status & _PSB_CE_TA_FINISHED) -+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE); -+ -+ if (status & _PSB_CE_TA_TERMINATE) -+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE); -+ -+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH | -+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL | -+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) { -+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM); -+ } -+ if (status & _PSB_CE_DPM_TA_MEM_FREE) -+ psb_ta_hw_scene_freed(dev_priv, scheduler); -+ -+ if (status & _PSB_CE_SW_EVENT) -+ psb_user_interrupt(dev_priv, scheduler); -+ -+ spin_unlock(&scheduler->lock); -+} -+ -+static void psb_free_task_wq(struct work_struct *work) -+{ -+ struct psb_scheduler *scheduler = -+ container_of(work, struct psb_scheduler, wq.work); -+ -+ struct list_head *list, *next; -+ unsigned long irq_flags; -+ struct psb_task *task; -+ -+ if (!mutex_trylock(&scheduler->task_wq_mutex)) -+ return; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ list_for_each_safe(list, next, &scheduler->task_done_queue) { -+ task = list_entry(list, struct psb_task, head); -+ list_del_init(list); -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ -+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, " -+ "Feedback bo 0x%08lx, done %d\n", -+ task->sequence, -+ (unsigned long) task->scene, -+ (unsigned long) task->feedback.bo, -+ atomic_read(&task->buf.done)); -+ -+ if (task->scene) { -+ PSB_DEBUG_RENDER("Unref scene %d\n", -+ task->sequence); -+ psb_scene_unref(&task->scene); -+ if (task->feedback.bo) { -+ PSB_DEBUG_RENDER("Unref feedback bo %d\n", -+ task->sequence); -+ ttm_bo_unref(&task->feedback.bo); -+ } -+ } -+ -+ if (atomic_read(&task->buf.done)) { -+ PSB_DEBUG_RENDER("Deleting task %d\n", -+ task->sequence); -+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER); -+ task = NULL; -+ } -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ if (task != NULL) -+ list_add(list, &scheduler->task_done_queue); -+ } -+ if (!list_empty(&scheduler->task_done_queue)) { -+ PSB_DEBUG_RENDER("Rescheduling wq\n"); -+ schedule_delayed_work(&scheduler->wq, 1); -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ -+ if (list_empty(&scheduler->task_done_queue) && -+ drm_psb_ospm && IS_MRST(scheduler->dev)) { -+ psb_try_power_down_sgx(scheduler->dev); -+ } -+ mutex_unlock(&scheduler->task_wq_mutex); -+} -+ -+/* -+ * Check if any of the tasks in the queues is using a scene. -+ * In that case we know the TA memory buffer objects are -+ * fenced and will not be evicted until that fence is signaled. -+ */ -+ -+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ struct psb_task *task; -+ struct psb_task *next_task; -+ -+ dev_priv->force_ta_mem_load = 1; -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, -+ head) { -+ if (task->scene) { -+ dev_priv->force_ta_mem_load = 0; -+ break; -+ } -+ } -+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue, -+ head) { -+ if (task->scene) { -+ dev_priv->force_ta_mem_load = 0; -+ break; -+ } -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+void psb_scheduler_reset(struct drm_psb_private *dev_priv, -+ int error_condition) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long wait_jiffies; -+ unsigned long cur_jiffies; -+ struct psb_task *task; -+ struct psb_task *next_task; -+ unsigned long irq_flags; -+ -+ psb_scheduler_pause(dev_priv); -+ if (!psb_scheduler_idle(dev_priv)) { -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ -+ cur_jiffies = jiffies; -+ wait_jiffies = cur_jiffies; -+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] && -+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies)) -+ wait_jiffies = scheduler->ta_end_jiffies; -+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] && -+ time_after_eq(scheduler->raster_end_jiffies, -+ wait_jiffies)) -+ wait_jiffies = scheduler->raster_end_jiffies; -+ -+ wait_jiffies -= cur_jiffies; -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ -+ (void) wait_event_timeout(scheduler->idle_queue, -+ psb_scheduler_idle(dev_priv), -+ wait_jiffies); -+ } -+ -+ if (!psb_scheduler_idle(dev_priv)) { -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER]; -+ if (task) { -+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n"); -+ if (task->engine == PSB_ENGINE_HPRAST) { -+ psb_fence_error(scheduler->dev, -+ PSB_ENGINE_HPRAST, -+ task->sequence, -+ _PSB_FENCE_TYPE_RASTER_DONE, -+ error_condition); -+ -+ list_del(&task->head); -+ psb_xhw_clean_buf(dev_priv, &task->buf); -+ list_add_tail(&task->head, -+ &scheduler->task_done_queue); -+ } else { -+ list_add(&task->head, -+ &scheduler->raster_queue); -+ } -+ } -+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL; -+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA]; -+ if (task) { -+ DRM_ERROR("Detected Poulsbo ta lockup.\n"); -+ list_add_tail(&task->head, -+ &scheduler->raster_queue); -+#ifdef FIX_TG_16 -+ psb_2d_atomic_unlock(dev_priv); -+#endif -+ } -+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL; -+ scheduler->ta_state = 0; -+ -+#ifdef FIX_TG_16 -+ atomic_set(&dev_priv->ta_wait_2d, 0); -+ atomic_set(&dev_priv->ta_wait_2d_irq, 0); -+ wake_up(&dev_priv->queue_2d); -+#endif -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ } -+ -+ /* -+ * Empty raster queue. -+ */ -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue, -+ head) { -+ struct psb_scene *scene = task->scene; -+ -+ DRM_INFO("Signaling fence sequence %u\n", -+ task->sequence); -+ -+ psb_fence_error(scheduler->dev, -+ task->engine, -+ task->sequence, -+ _PSB_FENCE_TYPE_TA_DONE | -+ _PSB_FENCE_TYPE_RASTER_DONE | -+ _PSB_FENCE_TYPE_SCENE_DONE | -+ _PSB_FENCE_TYPE_FEEDBACK, error_condition); -+ if (scene) { -+ scene->flags = 0; -+ if (scene->hw_scene) { -+ list_add_tail(&scene->hw_scene->head, -+ &scheduler->hw_scenes); -+ scene->hw_scene = NULL; -+ } -+ } -+ -+ psb_xhw_clean_buf(dev_priv, &task->buf); -+ list_del(&task->head); -+ list_add_tail(&task->head, &scheduler->task_done_queue); -+ } -+ -+ schedule_delayed_work(&scheduler->wq, 1); -+ scheduler->idle = 1; -+ wake_up(&scheduler->idle_queue); -+ -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+ psb_scheduler_restart(dev_priv); -+ -+} -+ -+int psb_scheduler_init(struct drm_device *dev, -+ struct psb_scheduler *scheduler) -+{ -+ struct psb_hw_scene *hw_scene; -+ int i; -+ -+ memset(scheduler, 0, sizeof(*scheduler)); -+ scheduler->dev = dev; -+ mutex_init(&scheduler->task_wq_mutex); -+ spin_lock_init(&scheduler->lock); -+ scheduler->idle = 1; -+ -+ INIT_LIST_HEAD(&scheduler->ta_queue); -+ INIT_LIST_HEAD(&scheduler->raster_queue); -+ INIT_LIST_HEAD(&scheduler->hp_raster_queue); -+ INIT_LIST_HEAD(&scheduler->hw_scenes); -+ INIT_LIST_HEAD(&scheduler->task_done_queue); -+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq); -+ init_waitqueue_head(&scheduler->idle_queue); -+ -+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) { -+ hw_scene = &scheduler->hs[i]; -+ hw_scene->context_number = i; -+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes); -+ } -+ -+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) -+ scheduler->seq[i].reported = 0; -+ return 0; -+} -+ -+/* -+ * Scene references maintained by the scheduler are not refcounted. -+ * Remove all references to a particular scene here. -+ */ -+ -+void psb_scheduler_remove_scene_refs(struct psb_scene *scene) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) scene->dev->dev_private; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ struct psb_hw_scene *hw_scene; -+ unsigned long irq_flags; -+ unsigned int i; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) { -+ hw_scene = &scheduler->hs[i]; -+ if (hw_scene->last_scene == scene) { -+ BUG_ON(list_empty(&hw_scene->head)); -+ hw_scene->last_scene = NULL; -+ } -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+void psb_scheduler_takedown(struct psb_scheduler *scheduler) -+{ -+ flush_scheduled_work(); -+} -+ -+static int psb_setup_task(struct drm_device *dev, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *raster_cmd_buffer, -+ struct ttm_buffer_object *ta_cmd_buffer, -+ struct ttm_buffer_object *oom_cmd_buffer, -+ struct psb_scene *scene, -+ enum psb_task_type task_type, -+ uint32_t engine, -+ uint32_t flags, struct psb_task **task_p) -+{ -+ struct psb_task *task; -+ int ret; -+ -+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) { -+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size); -+ return -EINVAL; -+ } -+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) { -+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size); -+ return -EINVAL; -+ } -+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) { -+ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size); -+ return -EINVAL; -+ } -+ -+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER); -+ if (!task) -+ return -ENOMEM; -+ -+ atomic_set(&task->buf.done, 1); -+ task->engine = engine; -+ INIT_LIST_HEAD(&task->head); -+ INIT_LIST_HEAD(&task->buf.head); -+ if (ta_cmd_buffer && arg->ta_size != 0) { -+ task->ta_cmd_size = arg->ta_size; -+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer, -+ arg->ta_offset, -+ arg->ta_size, -+ PSB_ENGINE_TA, task->ta_cmds); -+ if (ret) -+ goto out_err; -+ } -+ if (raster_cmd_buffer) { -+ task->raster_cmd_size = arg->cmdbuf_size; -+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer, -+ arg->cmdbuf_offset, -+ arg->cmdbuf_size, -+ PSB_ENGINE_TA, -+ task->raster_cmds); -+ if (ret) -+ goto out_err; -+ } -+ if (oom_cmd_buffer && arg->oom_size != 0) { -+ task->oom_cmd_size = arg->oom_size; -+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer, -+ arg->oom_offset, -+ arg->oom_size, -+ PSB_ENGINE_TA, -+ task->oom_cmds); -+ if (ret) -+ goto out_err; -+ } -+ task->task_type = task_type; -+ task->flags = flags; -+ if (scene) -+ task->scene = psb_scene_ref(scene); -+ -+ *task_p = task; -+ return 0; -+out_err: -+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER); -+ *task_p = NULL; -+ return ret; -+} -+ -+int psb_cmdbuf_ta(struct drm_file *priv, -+ struct psb_context *context, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct ttm_buffer_object *ta_buffer, -+ struct ttm_buffer_object *oom_buffer, -+ struct psb_scene *scene, -+ struct psb_feedback_info *feedback, -+ struct psb_ttm_fence_rep *fence_arg) -+{ -+ struct drm_device *dev = priv->minor->dev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct ttm_fence_object *fence = NULL; -+ struct psb_task *task = NULL; -+ int ret; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ uint32_t sequence; -+ -+ PSB_DEBUG_RENDER("Cmdbuf ta\n"); -+ -+ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer, -+ oom_buffer, scene, -+ psb_ta_task, PSB_ENGINE_TA, -+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task); -+ -+ if (ret) -+ goto out_err; -+ -+ task->feedback = *feedback; -+ mutex_lock(&dev_priv->reset_mutex); -+ -+ /* -+ * Hand the task over to the scheduler. -+ */ -+ -+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA); -+ -+ task->ta_complete_action = PSB_RASTER; -+ task->raster_complete_action = PSB_RETURN; -+ sequence = task->sequence; -+ -+ spin_lock_irq(&scheduler->lock); -+ -+ list_add_tail(&task->head, &scheduler->ta_queue); -+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence); -+ -+ psb_schedule_ta(dev_priv, scheduler); -+ -+ /** -+ * From this point we may no longer dereference task, -+ * as the object it points to may be freed by another thread. -+ */ -+ -+ task = NULL; -+ spin_unlock_irq(&scheduler->lock); -+ mutex_unlock(&dev_priv->reset_mutex); -+ -+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types, -+ arg->fence_flags, -+ &context->validate_list, fence_arg, &fence); -+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence); -+ -+ if (fence) { -+ spin_lock_irq(&scheduler->lock); -+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, -+ sequence, _PSB_FENCE_EXE_SHIFT, 1); -+ spin_unlock_irq(&scheduler->lock); -+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE; -+ } -+ -+out_err: -+ if (ret && ret != -ERESTART) -+ DRM_ERROR("TA task queue job failed.\n"); -+ -+ if (fence) { -+#ifdef PSB_WAIT_FOR_TA_COMPLETION -+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE | -+ _PSB_FENCE_TYPE_TA_DONE); -+#ifdef PSB_BE_PARANOID -+ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE | -+ _PSB_FENCE_TYPE_SCENE_DONE); -+#endif -+#endif -+ ttm_fence_object_unref(&fence); -+ } -+ return ret; -+} -+ -+int psb_cmdbuf_raster(struct drm_file *priv, -+ struct psb_context *context, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg) -+{ -+ struct drm_device *dev = priv->minor->dev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct ttm_fence_object *fence = NULL; -+ struct psb_task *task = NULL; -+ int ret; -+ uint32_t sequence; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ -+ PSB_DEBUG_RENDER("Cmdbuf Raster\n"); -+ -+ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL, -+ NULL, psb_raster_task, -+ PSB_ENGINE_TA, 0, &task); -+ -+ if (ret) -+ goto out_err; -+ -+ /* -+ * Hand the task over to the scheduler. -+ */ -+ -+ mutex_lock(&dev_priv->reset_mutex); -+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA); -+ task->ta_complete_action = PSB_RASTER; -+ task->raster_complete_action = PSB_RETURN; -+ sequence = task->sequence; -+ -+ spin_lock_irq(&scheduler->lock); -+ list_add_tail(&task->head, &scheduler->ta_queue); -+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence); -+ psb_schedule_ta(dev_priv, scheduler); -+ -+ /** -+ * From this point we may no longer dereference task, -+ * as the object it points to may be freed by another thread. -+ */ -+ -+ task = NULL; -+ spin_unlock_irq(&scheduler->lock); -+ mutex_unlock(&dev_priv->reset_mutex); -+ -+ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types, -+ arg->fence_flags, -+ &context->validate_list, fence_arg, &fence); -+ -+ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence); -+ if (fence) { -+ spin_lock_irq(&scheduler->lock); -+ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence, -+ _PSB_FENCE_EXE_SHIFT, 1); -+ spin_unlock_irq(&scheduler->lock); -+ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE; -+ } -+out_err: -+ if (ret && ret != -ERESTART) -+ DRM_ERROR("Raster task queue job failed.\n"); -+ -+ if (fence) { -+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION -+ ttm_fence_object_wait(fence, 1, 1, fence->type); -+#endif -+ ttm_fence_object_unref(&fence); -+ } -+ -+ return ret; -+} -+ -+#ifdef FIX_TG_16 -+ -+static int psb_check_2d_idle(struct drm_psb_private *dev_priv) -+{ -+ if (psb_2d_trylock(dev_priv)) { -+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && -+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & -+ _PSB_C2B_STATUS_BUSY))) { -+ return 0; -+ } -+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0) -+ psb_2D_irq_on(dev_priv); -+ -+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT); -+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT); -+ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT); -+ -+ psb_2d_atomic_unlock(dev_priv); -+ } -+ -+ atomic_set(&dev_priv->ta_wait_2d, 1); -+ return -EBUSY; -+} -+ -+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ -+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) { -+ psb_schedule_ta(dev_priv, scheduler); -+ if (atomic_read(&dev_priv->waiters_2d) != 0) -+ wake_up(&dev_priv->queue_2d); -+ } -+} -+ -+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) { -+ atomic_set(&dev_priv->ta_wait_2d, 0); -+ psb_2D_irq_off(dev_priv); -+ psb_schedule_ta(dev_priv, scheduler); -+ if (atomic_read(&dev_priv->waiters_2d) != 0) -+ wake_up(&dev_priv->queue_2d); -+ } -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+/* -+ * 2D locking functions. Can't use a mutex since the trylock() and -+ * unlock() methods need to be accessible from interrupt context. -+ */ -+ -+int psb_2d_trylock(struct drm_psb_private *dev_priv) -+{ -+ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0; -+} -+ -+void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv) -+{ -+ atomic_set(&dev_priv->lock_2d, 0); -+ if (atomic_read(&dev_priv->waiters_2d) != 0) -+ wake_up(&dev_priv->queue_2d); -+} -+ -+void psb_2d_unlock(struct drm_psb_private *dev_priv) -+{ -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&scheduler->lock, irq_flags); -+ psb_2d_atomic_unlock(dev_priv); -+ if (atomic_read(&dev_priv->ta_wait_2d) != 0) -+ psb_atomic_resume_ta_2d_idle(dev_priv); -+ spin_unlock_irqrestore(&scheduler->lock, irq_flags); -+} -+ -+void psb_2d_lock(struct drm_psb_private *dev_priv) -+{ -+ atomic_inc(&dev_priv->waiters_2d); -+ wait_event(dev_priv->queue_2d, -+ atomic_read(&dev_priv->ta_wait_2d) == 0); -+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv)); -+ atomic_dec(&dev_priv->waiters_2d); -+} -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h ---- a/drivers/gpu/drm/psb/psb_schedule.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_schedule.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,176 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> -+ */ -+ -+#ifndef _PSB_SCHEDULE_H_ -+#define _PSB_SCHEDULE_H_ -+ -+#include <drm/drmP.h> -+ -+struct psb_context; -+ -+enum psb_task_type { -+ psb_ta_midscene_task, -+ psb_ta_task, -+ psb_raster_task, -+ psb_freescene_task -+}; -+ -+#define PSB_MAX_TA_CMDS 60 -+#define PSB_MAX_RASTER_CMDS 60 -+#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6) -+ -+struct psb_xhw_buf { -+ struct list_head head; -+ int copy_back; -+ atomic_t done; -+ struct drm_psb_xhw_arg arg; -+ -+}; -+ -+struct psb_feedback_info { -+ struct ttm_buffer_object *bo; -+ struct page *page; -+ uint32_t offset; -+}; -+ -+struct psb_task { -+ struct list_head head; -+ struct psb_scene *scene; -+ struct psb_feedback_info feedback; -+ enum psb_task_type task_type; -+ uint32_t engine; -+ uint32_t sequence; -+ uint32_t ta_cmds[PSB_MAX_TA_CMDS]; -+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS]; -+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS]; -+ uint32_t ta_cmd_size; -+ uint32_t raster_cmd_size; -+ uint32_t oom_cmd_size; -+ uint32_t feedback_offset; -+ uint32_t ta_complete_action; -+ uint32_t raster_complete_action; -+ uint32_t hw_cookie; -+ uint32_t flags; -+ uint32_t reply_flags; -+ uint32_t aborting; -+ struct psb_xhw_buf buf; -+}; -+ -+struct psb_hw_scene { -+ struct list_head head; -+ uint32_t context_number; -+ -+ /* -+ * This pointer does not refcount the last_scene_buffer, -+ * so we must make sure it is set to NULL before destroying -+ * the corresponding task. -+ */ -+ -+ struct psb_scene *last_scene; -+}; -+ -+struct psb_scene; -+struct drm_psb_private; -+ -+struct psb_scheduler_seq { -+ uint32_t sequence; -+ int reported; -+}; -+ -+struct psb_scheduler { -+ struct drm_device *dev; -+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES]; -+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES]; -+ struct mutex task_wq_mutex; -+ spinlock_t lock; -+ struct list_head hw_scenes; -+ struct list_head ta_queue; -+ struct list_head raster_queue; -+ struct list_head hp_raster_queue; -+ struct list_head task_done_queue; -+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES]; -+ struct psb_task *feedback_task; -+ int ta_state; -+ struct psb_hw_scene *pending_hw_scene; -+ uint32_t pending_hw_scene_seq; -+ struct delayed_work wq; -+ struct psb_scene_pool *pool; -+ uint32_t idle_count; -+ int idle; -+ wait_queue_head_t idle_queue; -+ unsigned long ta_end_jiffies; -+ unsigned long total_ta_jiffies; -+ unsigned long raster_end_jiffies; -+ unsigned long total_raster_jiffies; -+}; -+ -+#define PSB_RF_FIRE_TA (1 << 0) -+#define PSB_RF_OOM (1 << 1) -+#define PSB_RF_OOM_REPLY (1 << 2) -+#define PSB_RF_TERMINATE (1 << 3) -+#define PSB_RF_TA_DONE (1 << 4) -+#define PSB_RF_FIRE_RASTER (1 << 5) -+#define PSB_RF_RASTER_DONE (1 << 6) -+#define PSB_RF_DEALLOC (1 << 7) -+ -+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv, -+ int shareable, -+ uint32_t w, uint32_t h); -+extern uint32_t psb_scene_handle(struct psb_scene *scene); -+extern int psb_scheduler_init(struct drm_device *dev, -+ struct psb_scheduler *scheduler); -+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler); -+extern int psb_cmdbuf_ta(struct drm_file *priv, -+ struct psb_context *context, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct ttm_buffer_object *ta_buffer, -+ struct ttm_buffer_object *oom_buffer, -+ struct psb_scene *scene, -+ struct psb_feedback_info *feedback, -+ struct psb_ttm_fence_rep *fence_arg); -+extern int psb_cmdbuf_raster(struct drm_file *priv, -+ struct psb_context *context, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg); -+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv, -+ uint32_t status); -+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv); -+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv); -+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv); -+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv); -+ -+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv, -+ int *lockup, int *idle); -+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv, -+ int error_condition); -+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv); -+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene); -+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv); -+extern int psb_extend_timeout(struct drm_psb_private *dev_priv, -+ uint32_t xhw_lockup); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c ---- a/drivers/gpu/drm/psb/psb_setup.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_setup.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,18 @@ -+#include <drm/drmP.h> -+#include <drm/drm.h> -+#include <drm/drm_crtc.h> -+#include <drm/drm_edid.h> -+#include "psb_intel_drv.h" -+#include "psb_drv.h" -+#include "psb_intel_reg.h" -+ -+/* Fixed name */ -+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC" -+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD" -+ -+#include "psb_intel_i2c.c" -+#include "psb_intel_sdvo.c" -+#include "psb_intel_modes.c" -+#include "psb_intel_lvds.c" -+#include "psb_intel_dsi.c" -+#include "psb_intel_display.c" -diff -uNr a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c ---- a/drivers/gpu/drm/psb/psb_sgx.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_sgx.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1869 @@ -+/************************************************************************** -+ * Copyright (c) 2007, Intel Corporation. -+ * All Rights Reserved. -+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "psb_drm.h" -+#include "psb_reg.h" -+#include "psb_scene.h" -+#include "psb_msvdx.h" -+#include "lnc_topaz.h" -+#include "ttm/ttm_bo_api.h" -+#include "ttm/ttm_execbuf_util.h" -+#include "ttm/ttm_userobj_api.h" -+#include "ttm/ttm_placement_common.h" -+#include "psb_sgx.h" -+ -+static inline int psb_same_page(unsigned long offset, -+ unsigned long offset2) -+{ -+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); -+} -+ -+static inline unsigned long psb_offset_end(unsigned long offset, -+ unsigned long end) -+{ -+ offset = (offset + PAGE_SIZE) & PAGE_MASK; -+ return (end < offset) ? end : offset; -+} -+ -+static void psb_idle_engine(struct drm_device *dev, int engine); -+ -+struct psb_dstbuf_cache { -+ unsigned int dst; -+ struct ttm_buffer_object *dst_buf; -+ unsigned long dst_offset; -+ uint32_t *dst_page; -+ unsigned int dst_page_offset; -+ struct ttm_bo_kmap_obj dst_kmap; -+ bool dst_is_iomem; -+}; -+ -+struct psb_validate_buffer { -+ struct ttm_validate_buffer base; -+ struct psb_validate_req req; -+ int ret; -+ struct psb_validate_arg __user *user_val_arg; -+ uint32_t flags; -+ uint32_t offset; -+ int po_correct; -+}; -+ -+ -+ -+#define PSB_REG_GRAN_SHIFT 2 -+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT) -+#define PSB_MAX_REG 0x1000 -+ -+static const uint32_t disallowed_ranges[][2] = { -+ {0x0000, 0x0200}, -+ {0x0208, 0x0214}, -+ {0x021C, 0x0224}, -+ {0x0230, 0x0234}, -+ {0x0248, 0x024C}, -+ {0x0254, 0x0358}, -+ {0x0428, 0x0428}, -+ {0x0430, 0x043C}, -+ {0x0498, 0x04B4}, -+ {0x04CC, 0x04D8}, -+ {0x04E0, 0x07FC}, -+ {0x0804, 0x0A14}, -+ {0x0A4C, 0x0A58}, -+ {0x0A68, 0x0A80}, -+ {0x0AA0, 0x0B1C}, -+ {0x0B2C, 0x0CAC}, -+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY} -+}; -+ -+static uint32_t psb_disallowed_regs[PSB_MAX_REG / -+ (PSB_REG_GRANULARITY * -+ (sizeof(uint32_t) << 3))]; -+ -+static inline int psb_disallowed(uint32_t reg) -+{ -+ reg >>= PSB_REG_GRAN_SHIFT; -+ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0; -+} -+ -+void psb_init_disallowed(void) -+{ -+ int i; -+ uint32_t reg, tmp; -+ static int initialized; -+ -+ if (initialized) -+ return; -+ -+ initialized = 1; -+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs)); -+ -+ for (i = 0; -+ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t))); -+ ++i) { -+ for (reg = disallowed_ranges[i][0]; -+ reg <= disallowed_ranges[i][1]; reg += 4) { -+ tmp = reg >> 2; -+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31)); -+ } -+ } -+} -+ -+static int psb_memcpy_check(uint32_t *dst, const uint32_t *src, -+ uint32_t size) -+{ -+ size >>= 3; -+ while (size--) { -+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) { -+ DRM_ERROR("Forbidden SGX register access: " -+ "0x%04x.\n", *src); -+ return -EPERM; -+ } -+ *dst++ = *src++; -+ *dst++ = *src++; -+ } -+ return 0; -+} -+ -+int psb_2d_wait_available(struct drm_psb_private *dev_priv, -+ unsigned size) -+{ -+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF); -+ int ret = 0; -+ -+retry: -+ if (avail < size) { -+#if 0 -+ /* We'd ideally -+ * like to have an IRQ-driven event here. -+ */ -+ -+ psb_2D_irq_on(dev_priv); -+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ, -+ ((avail = -+ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size)); -+ psb_2D_irq_off(dev_priv); -+ if (ret == 0) -+ return 0; -+ if (ret == -EINTR) { -+ ret = 0; -+ goto retry; -+ } -+#else -+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF); -+ goto retry; -+#endif -+ } -+ return ret; -+} -+ -+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf, -+ unsigned size) -+{ -+ int ret = 0; -+ int i; -+ unsigned submit_size; -+ -+ while (size > 0) { -+ submit_size = (size < 0x60) ? size : 0x60; -+ size -= submit_size; -+ ret = psb_2d_wait_available(dev_priv, submit_size); -+ if (ret) -+ return ret; -+ -+ submit_size <<= 2; -+ mutex_lock(&dev_priv->reset_mutex); -+ for (i = 0; i < submit_size; i += 4) { -+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i); -+ } -+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4); -+ mutex_unlock(&dev_priv->reset_mutex); -+ } -+ return 0; -+} -+ -+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence) -+{ -+ uint32_t buffer[8]; -+ uint32_t *bufp = buffer; -+ int ret; -+ -+ *bufp++ = PSB_2D_FENCE_BH; -+ -+ *bufp++ = PSB_2D_DST_SURF_BH | -+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT); -+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset; -+ -+ *bufp++ = PSB_2D_BLIT_BH | -+ PSB_2D_ROT_NONE | -+ PSB_2D_COPYORDER_TL2BR | -+ PSB_2D_DSTCK_DISABLE | -+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY; -+ -+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT; -+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) | -+ (0 << PSB_2D_DST_YSTART_SHIFT); -+ *bufp++ = -+ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT); -+ -+ *bufp++ = PSB_2D_FLUSH_BH; -+ -+ psb_2d_lock(dev_priv); -+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer); -+ psb_2d_unlock(dev_priv); -+ -+ if (!ret) -+ psb_schedule_watchdog(dev_priv); -+ return ret; -+} -+ -+int psb_emit_2d_copy_blit(struct drm_device *dev, -+ uint32_t src_offset, -+ uint32_t dst_offset, uint32_t pages, -+ int direction) -+{ -+ uint32_t cur_pages; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ uint32_t buf[10]; -+ uint32_t *bufp; -+ uint32_t xstart; -+ uint32_t ystart; -+ uint32_t blit_cmd; -+ uint32_t pg_add; -+ int ret = 0; -+ -+ if (!dev_priv) -+ return 0; -+ -+ if (direction) { -+ pg_add = (pages - 1) << PAGE_SHIFT; -+ src_offset += pg_add; -+ dst_offset += pg_add; -+ } -+ -+ blit_cmd = PSB_2D_BLIT_BH | -+ PSB_2D_ROT_NONE | -+ PSB_2D_DSTCK_DISABLE | -+ PSB_2D_SRCCK_DISABLE | -+ PSB_2D_USE_PAT | -+ PSB_2D_ROP3_SRCCOPY | -+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR); -+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0; -+ -+ psb_2d_lock(dev_priv); -+ while (pages > 0) { -+ cur_pages = pages; -+ if (cur_pages > 2048) -+ cur_pages = 2048; -+ pages -= cur_pages; -+ ystart = (direction) ? cur_pages - 1 : 0; -+ -+ bufp = buf; -+ *bufp++ = PSB_2D_FENCE_BH; -+ -+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB | -+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT); -+ *bufp++ = dst_offset; -+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB | -+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT); -+ *bufp++ = src_offset; -+ *bufp++ = -+ PSB_2D_SRC_OFF_BH | (xstart << -+ PSB_2D_SRCOFF_XSTART_SHIFT) | -+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT); -+ *bufp++ = blit_cmd; -+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) | -+ (ystart << PSB_2D_DST_YSTART_SHIFT); -+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) | -+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT); -+ -+ ret = psb_2d_submit(dev_priv, buf, bufp - buf); -+ if (ret) -+ goto out; -+ pg_add = -+ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1); -+ src_offset += pg_add; -+ dst_offset += pg_add; -+ } -+out: -+ psb_2d_unlock(dev_priv); -+ return ret; -+} -+ -+void psb_init_2d(struct drm_psb_private *dev_priv) -+{ -+ spin_lock_init(&dev_priv->sequence_lock); -+ psb_reset(dev_priv, 1); -+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start; -+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE); -+ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE); -+} -+ -+int psb_idle_2d(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ unsigned long _end = jiffies + DRM_HZ; -+ int busy = 0; -+ -+ /* -+ * First idle the 2D engine. -+ */ -+ -+ if (dev_priv->engine_lockup_2d) -+ return -EBUSY; -+ -+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && -+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == -+ 0)) -+ goto out; -+ -+ do { -+ busy = -+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); -+ } while (busy && !time_after_eq(jiffies, _end)); -+ -+ if (busy) -+ busy = -+ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); -+ if (busy) -+ goto out; -+ -+ do { -+ busy = -+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & -+ _PSB_C2B_STATUS_BUSY) -+ != 0); -+ } while (busy && !time_after_eq(jiffies, _end)); -+ if (busy) -+ busy = -+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & -+ _PSB_C2B_STATUS_BUSY) -+ != 0); -+ -+out: -+ if (busy) -+ dev_priv->engine_lockup_2d = 1; -+ -+ return (busy) ? -EBUSY : 0; -+} -+ -+int psb_idle_3d(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ int ret; -+ -+ ret = wait_event_timeout(scheduler->idle_queue, -+ psb_scheduler_finished(dev_priv), -+ DRM_HZ * 10); -+ -+ return (ret < 1) ? -EBUSY : 0; -+} -+ -+static int psb_check_presumed(struct psb_validate_req *req, -+ struct ttm_buffer_object *bo, -+ struct psb_validate_arg __user *data, -+ int *presumed_ok) -+{ -+ struct psb_validate_req __user *user_req = &(data->d.req); -+ -+ *presumed_ok = 0; -+ -+ if (bo->mem.mem_type == TTM_PL_SYSTEM) { -+ *presumed_ok = 1; -+ return 0; -+ } -+ -+ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED))) -+ return 0; -+ -+ if (bo->offset == req->presumed_gpu_offset) { -+ *presumed_ok = 1; -+ return 0; -+ } -+ -+ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED, -+ &user_req->presumed_flags); -+} -+ -+ -+static void psb_unreference_buffers(struct psb_context *context) -+{ -+ struct ttm_validate_buffer *entry, *next; -+ struct psb_validate_buffer *vbuf; -+ struct list_head *list = &context->validate_list; -+ -+ list_for_each_entry_safe(entry, next, list, head) { -+ vbuf = -+ container_of(entry, struct psb_validate_buffer, base); -+ list_del(&entry->head); -+ ttm_bo_unref(&entry->bo); -+ } -+ -+ list = &context->kern_validate_list; -+ -+ list_for_each_entry_safe(entry, next, list, head) { -+ vbuf = -+ container_of(entry, struct psb_validate_buffer, base); -+ list_del(&entry->head); -+ ttm_bo_unref(&entry->bo); -+ } -+} -+ -+ -+static int psb_lookup_validate_buffer(struct drm_file *file_priv, -+ uint64_t data, -+ struct psb_validate_buffer *item) -+{ -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ -+ item->user_val_arg = -+ (struct psb_validate_arg __user *) (unsigned long) data; -+ -+ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req, -+ sizeof(item->req)) != 0)) { -+ DRM_ERROR("Lookup copy fault.\n"); -+ return -EFAULT; -+ } -+ -+ item->base.bo = -+ ttm_buffer_object_lookup(tfile, item->req.buffer_handle); -+ -+ if (unlikely(item->base.bo == NULL)) { -+ DRM_ERROR("Bo lookup fault.\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int psb_reference_buffers(struct drm_file *file_priv, -+ uint64_t data, -+ struct psb_context *context) -+{ -+ struct psb_validate_buffer *item; -+ int ret; -+ -+ while (likely(data != 0)) { -+ if (unlikely(context->used_buffers >= -+ PSB_NUM_VALIDATE_BUFFERS)) { -+ DRM_ERROR("Too many buffers " -+ "on validate list.\n"); -+ ret = -EINVAL; -+ goto out_err0; -+ } -+ -+ item = &context->buffers[context->used_buffers]; -+ -+ ret = psb_lookup_validate_buffer(file_priv, data, item); -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ -+ item->base.reserved = 0; -+ list_add_tail(&item->base.head, &context->validate_list); -+ context->used_buffers++; -+ data = item->req.next; -+ } -+ return 0; -+ -+out_err0: -+ psb_unreference_buffers(context); -+ return ret; -+} -+ -+static int -+psb_placement_fence_type(struct ttm_buffer_object *bo, -+ uint64_t set_val_flags, -+ uint64_t clr_val_flags, -+ uint32_t new_fence_class, -+ uint32_t *new_fence_type) -+{ -+ int ret; -+ uint32_t n_fence_type; -+ uint32_t set_flags = set_val_flags & 0xFFFFFFFF; -+ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF; -+ struct ttm_fence_object *old_fence; -+ uint32_t old_fence_type; -+ -+ if (unlikely -+ (!(set_val_flags & -+ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) { -+ DRM_ERROR -+ ("GPU access type (read / write) is not indicated.\n"); -+ return -EINVAL; -+ } -+ -+ ret = ttm_bo_check_placement(bo, set_flags, clr_flags); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ switch (new_fence_class) { -+ case PSB_ENGINE_TA: -+ n_fence_type = _PSB_FENCE_TYPE_EXE | -+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE; -+ if (set_val_flags & PSB_BO_FLAG_TA) -+ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE; -+ if (set_val_flags & PSB_BO_FLAG_COMMAND) -+ n_fence_type &= -+ ~(_PSB_FENCE_TYPE_RASTER_DONE | -+ _PSB_FENCE_TYPE_TA_DONE); -+ if (set_val_flags & PSB_BO_FLAG_SCENE) -+ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE; -+ if (set_val_flags & PSB_BO_FLAG_FEEDBACK) -+ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK; -+ break; -+ default: -+ n_fence_type = _PSB_FENCE_TYPE_EXE; -+ } -+ -+ *new_fence_type = n_fence_type; -+ old_fence = (struct ttm_fence_object *) bo->sync_obj; -+ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg; -+ -+ if (old_fence && ((new_fence_class != old_fence->fence_class) || -+ ((n_fence_type ^ old_fence_type) & -+ old_fence_type))) { -+ ret = ttm_bo_wait(bo, 0, 1, 0); -+ if (unlikely(ret != 0)) -+ return ret; -+ } -+ -+ bo->proposed_flags = (bo->proposed_flags | set_flags) -+ & ~clr_flags & TTM_PL_MASK_MEMTYPE; -+ -+ return 0; -+} -+ -+int psb_validate_kernel_buffer(struct psb_context *context, -+ struct ttm_buffer_object *bo, -+ uint32_t fence_class, -+ uint64_t set_flags, uint64_t clr_flags) -+{ -+ struct psb_validate_buffer *item; -+ uint32_t cur_fence_type; -+ int ret; -+ -+ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) { -+ DRM_ERROR("Out of free validation buffer entries for " -+ "kernel buffer validation.\n"); -+ return -ENOMEM; -+ } -+ -+ item = &context->buffers[context->used_buffers]; -+ item->user_val_arg = NULL; -+ item->base.reserved = 0; -+ -+ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq); -+ if (unlikely(ret != 0)) -+ goto out_unlock; -+ -+ mutex_lock(&bo->mutex); -+ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class, -+ &cur_fence_type); -+ if (unlikely(ret != 0)) { -+ ttm_bo_unreserve(bo); -+ goto out_unlock; -+ } -+ -+ item->base.bo = ttm_bo_reference(bo); -+ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type; -+ item->base.reserved = 1; -+ -+ list_add_tail(&item->base.head, &context->kern_validate_list); -+ context->used_buffers++; -+ -+ ret = ttm_buffer_object_validate(bo, 1, 0); -+ if (unlikely(ret != 0)) -+ goto out_unlock; -+ -+ item->offset = bo->offset; -+ item->flags = bo->mem.flags; -+ context->fence_types |= cur_fence_type; -+ -+out_unlock: -+ mutex_unlock(&bo->mutex); -+ return ret; -+} -+ -+ -+static int psb_validate_buffer_list(struct drm_file *file_priv, -+ uint32_t fence_class, -+ struct psb_context *context, -+ int *po_correct) -+{ -+ struct psb_validate_buffer *item; -+ struct ttm_buffer_object *bo; -+ int ret; -+ struct psb_validate_req *req; -+ uint32_t fence_types = 0; -+ uint32_t cur_fence_type; -+ struct ttm_validate_buffer *entry; -+ struct list_head *list = &context->validate_list; -+ -+ *po_correct = 1; -+ -+ list_for_each_entry(entry, list, head) { -+ item = -+ container_of(entry, struct psb_validate_buffer, base); -+ bo = entry->bo; -+ item->ret = 0; -+ req = &item->req; -+ -+ mutex_lock(&bo->mutex); -+ ret = psb_placement_fence_type(bo, -+ req->set_flags, -+ req->clear_flags, -+ fence_class, -+ &cur_fence_type); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ ret = ttm_buffer_object_validate(bo, 1, 0); -+ -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ fence_types |= cur_fence_type; -+ entry->new_sync_obj_arg = (void *) -+ (unsigned long) cur_fence_type; -+ -+ item->offset = bo->offset; -+ item->flags = bo->mem.flags; -+ mutex_unlock(&bo->mutex); -+ -+ ret = -+ psb_check_presumed(&item->req, bo, item->user_val_arg, -+ &item->po_correct); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ if (unlikely(!item->po_correct)) -+ *po_correct = 0; -+ -+ item++; -+ } -+ -+ context->fence_types |= fence_types; -+ -+ return 0; -+out_err: -+ mutex_unlock(&bo->mutex); -+ item->ret = ret; -+ return ret; -+} -+ -+ -+int -+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs, -+ unsigned int cmds) -+{ -+ int i; -+ -+ /* -+ * cmds is 32-bit words. -+ */ -+ -+ cmds >>= 1; -+ for (i = 0; i < cmds; ++i) { -+ PSB_WSGX32(regs[1], regs[0]); -+ regs += 2; -+ } -+ wmb(); -+ return 0; -+} -+ -+/* -+ * Security: Block user-space writing to MMU mapping registers. -+ * This is important for security and brings Poulsbo DRM -+ * up to par with the other DRM drivers. Using this, -+ * user-space should not be able to map arbitrary memory -+ * pages to graphics memory, but all user-space processes -+ * basically have access to all buffer objects mapped to -+ * graphics memory. -+ */ -+ -+int -+psb_submit_copy_cmdbuf(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, -+ unsigned long cmd_size, -+ int engine, uint32_t *copy_buffer) -+{ -+ unsigned long cmd_end = cmd_offset + (cmd_size << 2); -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ unsigned long cmd_page_offset = -+ cmd_offset - (cmd_offset & PAGE_MASK); -+ unsigned long cmd_next; -+ struct ttm_bo_kmap_obj cmd_kmap; -+ uint32_t *cmd_page; -+ unsigned cmds; -+ bool is_iomem; -+ int ret = 0; -+ -+ if (cmd_size == 0) -+ return 0; -+ -+ if (engine == PSB_ENGINE_2D) -+ psb_2d_lock(dev_priv); -+ -+ do { -+ cmd_next = psb_offset_end(cmd_offset, cmd_end); -+ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, -+ 1, &cmd_kmap); -+ -+ if (ret) { -+ if (engine == PSB_ENGINE_2D) -+ psb_2d_unlock(dev_priv); -+ return ret; -+ } -+ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem); -+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2; -+ cmds = (cmd_next - cmd_offset) >> 2; -+ -+ switch (engine) { -+ case PSB_ENGINE_2D: -+ ret = -+ psb_2d_submit(dev_priv, -+ cmd_page + cmd_page_offset, -+ cmds); -+ break; -+ case PSB_ENGINE_RASTERIZER: -+ case PSB_ENGINE_TA: -+ case PSB_ENGINE_HPRAST: -+ PSB_DEBUG_GENERAL("Reg copy.\n"); -+ ret = psb_memcpy_check(copy_buffer, -+ cmd_page + cmd_page_offset, -+ cmds * sizeof(uint32_t)); -+ copy_buffer += cmds; -+ break; -+ default: -+ ret = -EINVAL; -+ } -+ ttm_bo_kunmap(&cmd_kmap); -+ if (ret) -+ break; -+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end); -+ -+ if (engine == PSB_ENGINE_2D) -+ psb_2d_unlock(dev_priv); -+ -+ return ret; -+} -+ -+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache) -+{ -+ if (dst_cache->dst_page) { -+ ttm_bo_kunmap(&dst_cache->dst_kmap); -+ dst_cache->dst_page = NULL; -+ } -+ dst_cache->dst_buf = NULL; -+ dst_cache->dst = ~0; -+} -+ -+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache, -+ struct psb_validate_buffer *buffers, -+ unsigned int dst, -+ unsigned long dst_offset) -+{ -+ int ret; -+ -+ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst); -+ -+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) { -+ psb_clear_dstbuf_cache(dst_cache); -+ dst_cache->dst = dst; -+ dst_cache->dst_buf = buffers[dst].base.bo; -+ } -+ -+ if (unlikely -+ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) { -+ DRM_ERROR("Relocation destination out of bounds.\n"); -+ return -EINVAL; -+ } -+ -+ if (!psb_same_page(dst_cache->dst_offset, dst_offset) || -+ NULL == dst_cache->dst_page) { -+ if (NULL != dst_cache->dst_page) { -+ ttm_bo_kunmap(&dst_cache->dst_kmap); -+ dst_cache->dst_page = NULL; -+ } -+ -+ ret = -+ ttm_bo_kmap(dst_cache->dst_buf, -+ dst_offset >> PAGE_SHIFT, 1, -+ &dst_cache->dst_kmap); -+ if (ret) { -+ DRM_ERROR("Could not map destination buffer for " -+ "relocation.\n"); -+ return ret; -+ } -+ -+ dst_cache->dst_page = -+ ttm_kmap_obj_virtual(&dst_cache->dst_kmap, -+ &dst_cache->dst_is_iomem); -+ dst_cache->dst_offset = dst_offset & PAGE_MASK; -+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2; -+ } -+ return 0; -+} -+ -+static int psb_apply_reloc(struct drm_psb_private *dev_priv, -+ uint32_t fence_class, -+ const struct drm_psb_reloc *reloc, -+ struct psb_validate_buffer *buffers, -+ int num_buffers, -+ struct psb_dstbuf_cache *dst_cache, -+ int no_wait, int interruptible) -+{ -+ uint32_t val; -+ uint32_t background; -+ unsigned int index; -+ int ret; -+ unsigned int shift; -+ unsigned int align_shift; -+ struct ttm_buffer_object *reloc_bo; -+ -+ -+ PSB_DEBUG_GENERAL("Reloc type %d\n" -+ "\t where 0x%04x\n" -+ "\t buffer 0x%04x\n" -+ "\t mask 0x%08x\n" -+ "\t shift 0x%08x\n" -+ "\t pre_add 0x%08x\n" -+ "\t background 0x%08x\n" -+ "\t dst_buffer 0x%08x\n" -+ "\t arg0 0x%08x\n" -+ "\t arg1 0x%08x\n", -+ reloc->reloc_op, -+ reloc->where, -+ reloc->buffer, -+ reloc->mask, -+ reloc->shift, -+ reloc->pre_add, -+ reloc->background, -+ reloc->dst_buffer, reloc->arg0, reloc->arg1); -+ -+ if (unlikely(reloc->buffer >= num_buffers)) { -+ DRM_ERROR("Illegal relocation buffer %d.\n", -+ reloc->buffer); -+ return -EINVAL; -+ } -+ -+ if (buffers[reloc->buffer].po_correct) -+ return 0; -+ -+ if (unlikely(reloc->dst_buffer >= num_buffers)) { -+ DRM_ERROR -+ ("Illegal destination buffer for relocation %d.\n", -+ reloc->dst_buffer); -+ return -EINVAL; -+ } -+ -+ ret = -+ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer, -+ reloc->where << 2); -+ if (ret) -+ return ret; -+ -+ reloc_bo = buffers[reloc->buffer].base.bo; -+ -+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) { -+ DRM_ERROR("Illegal relocation offset add.\n"); -+ return -EINVAL; -+ } -+ -+ switch (reloc->reloc_op) { -+ case PSB_RELOC_OP_OFFSET: -+ val = reloc_bo->offset + reloc->pre_add; -+ break; -+ case PSB_RELOC_OP_2D_OFFSET: -+ val = reloc_bo->offset + reloc->pre_add - -+ dev_priv->mmu_2d_offset; -+ if (unlikely(val >= PSB_2D_SIZE)) { -+ DRM_ERROR("2D relocation out of bounds\n"); -+ return -EINVAL; -+ } -+ break; -+ case PSB_RELOC_OP_PDS_OFFSET: -+ val = -+ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START; -+ if (unlikely -+ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) { -+ DRM_ERROR("PDS relocation out of bounds\n"); -+ return -EINVAL; -+ } -+ break; -+ default: -+ DRM_ERROR("Unimplemented relocation.\n"); -+ return -EINVAL; -+ } -+ -+ shift = -+ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT; -+ align_shift = -+ (reloc-> -+ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT; -+ -+ val = ((val >> align_shift) << shift); -+ index = reloc->where - dst_cache->dst_page_offset; -+ -+ background = reloc->background; -+ val = (background & ~reloc->mask) | (val & reloc->mask); -+ dst_cache->dst_page[index] = val; -+ -+ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n", -+ reloc->dst_buffer, index, -+ dst_cache->dst_page[index]); -+ -+ return 0; -+} -+ -+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv, -+ unsigned int num_pages) -+{ -+ int ret = 0; -+ -+ spin_lock(&dev_priv->reloc_lock); -+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) { -+ dev_priv->rel_mapped_pages += num_pages; -+ ret = 1; -+ } -+ spin_unlock(&dev_priv->reloc_lock); -+ return ret; -+} -+ -+static int psb_fixup_relocs(struct drm_file *file_priv, -+ uint32_t fence_class, -+ unsigned int num_relocs, -+ unsigned int reloc_offset, -+ uint32_t reloc_handle, -+ struct psb_context *context, -+ int no_wait, int interruptible) -+{ -+ struct drm_device *dev = file_priv->minor->dev; -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_buffer_object *reloc_buffer = NULL; -+ unsigned int reloc_num_pages; -+ unsigned int reloc_first_page; -+ unsigned int reloc_last_page; -+ struct psb_dstbuf_cache dst_cache; -+ struct drm_psb_reloc *reloc; -+ struct ttm_bo_kmap_obj reloc_kmap; -+ bool reloc_is_iomem; -+ int count; -+ int ret = 0; -+ int registered = 0; -+ uint32_t num_buffers = context->used_buffers; -+ -+ if (num_relocs == 0) -+ return 0; -+ -+ memset(&dst_cache, 0, sizeof(dst_cache)); -+ memset(&reloc_kmap, 0, sizeof(reloc_kmap)); -+ -+ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle); -+ if (!reloc_buffer) -+ goto out; -+ -+ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) { -+ DRM_ERROR("Relocation buffer was not on validate list.\n"); -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ reloc_first_page = reloc_offset >> PAGE_SHIFT; -+ reloc_last_page = -+ (reloc_offset + -+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT; -+ reloc_num_pages = reloc_last_page - reloc_first_page + 1; -+ reloc_offset &= ~PAGE_MASK; -+ -+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) { -+ DRM_ERROR("Relocation buffer is too large\n"); -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ, -+ (registered = -+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages))); -+ -+ if (ret == -EINTR) { -+ ret = -ERESTART; -+ goto out; -+ } -+ if (ret) { -+ DRM_ERROR("Error waiting for space to map " -+ "relocation buffer.\n"); -+ goto out; -+ } -+ -+ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page, -+ reloc_num_pages, &reloc_kmap); -+ -+ if (ret) { -+ DRM_ERROR("Could not map relocation buffer.\n" -+ "\tReloc buffer id 0x%08x.\n" -+ "\tReloc first page %d.\n" -+ "\tReloc num pages %d.\n", -+ reloc_handle, reloc_first_page, reloc_num_pages); -+ goto out; -+ } -+ -+ reloc = (struct drm_psb_reloc *) -+ ((unsigned long) -+ ttm_kmap_obj_virtual(&reloc_kmap, -+ &reloc_is_iomem) + reloc_offset); -+ -+ for (count = 0; count < num_relocs; ++count) { -+ ret = psb_apply_reloc(dev_priv, fence_class, -+ reloc, context->buffers, -+ num_buffers, &dst_cache, -+ no_wait, interruptible); -+ if (ret) -+ goto out1; -+ reloc++; -+ } -+ -+out1: -+ ttm_bo_kunmap(&reloc_kmap); -+out: -+ if (registered) { -+ spin_lock(&dev_priv->reloc_lock); -+ dev_priv->rel_mapped_pages -= reloc_num_pages; -+ spin_unlock(&dev_priv->reloc_lock); -+ DRM_WAKEUP(&dev_priv->rel_mapped_queue); -+ } -+ -+ psb_clear_dstbuf_cache(&dst_cache); -+ if (reloc_buffer) -+ ttm_bo_unref(&reloc_buffer); -+ return ret; -+} -+ -+void psb_fence_or_sync(struct drm_file *file_priv, -+ uint32_t engine, -+ uint32_t fence_types, -+ uint32_t fence_flags, -+ struct list_head *list, -+ struct psb_ttm_fence_rep *fence_arg, -+ struct ttm_fence_object **fence_p) -+{ -+ struct drm_device *dev = file_priv->minor->dev; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ struct ttm_fence_device *fdev = &dev_priv->fdev; -+ int ret; -+ struct ttm_fence_object *fence; -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ uint32_t handle; -+ -+ ret = ttm_fence_user_create(fdev, tfile, -+ engine, fence_types, -+ TTM_FENCE_FLAG_EMIT, &fence, &handle); -+ if (ret) { -+ -+ /* -+ * Fence creation failed. -+ * Fall back to synchronous operation and idle the engine. -+ */ -+ -+ psb_idle_engine(dev, engine); -+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { -+ -+ /* -+ * Communicate to user-space that -+ * fence creation has failed and that -+ * the engine is idle. -+ */ -+ -+ fence_arg->handle = ~0; -+ fence_arg->error = ret; -+ } -+ -+ ttm_eu_backoff_reservation(list); -+ if (fence_p) -+ *fence_p = NULL; -+ return; -+ } -+ -+ ttm_eu_fence_buffer_objects(list, fence); -+ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) { -+ struct ttm_fence_info info = ttm_fence_get_info(fence); -+ fence_arg->handle = handle; -+ fence_arg->fence_class = ttm_fence_class(fence); -+ fence_arg->fence_type = ttm_fence_types(fence); -+ fence_arg->signaled_types = info.signaled_types; -+ fence_arg->error = 0; -+ } else { -+ ret = -+ ttm_ref_object_base_unref(tfile, handle, -+ ttm_fence_type); -+ BUG_ON(ret); -+ } -+ -+ if (fence_p) -+ *fence_p = fence; -+ else if (fence) -+ ttm_fence_object_unref(&fence); -+} -+ -+ -+ -+static int psb_cmdbuf_2d(struct drm_file *priv, -+ struct list_head *validate_list, -+ uint32_t fence_type, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg) -+{ -+ struct drm_device *dev = priv->minor->dev; -+ int ret; -+ -+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, -+ arg->cmdbuf_size, PSB_ENGINE_2D, -+ NULL); -+ if (ret) -+ goto out_unlock; -+ -+ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type, -+ arg->fence_flags, validate_list, fence_arg, -+ NULL); -+ -+ mutex_lock(&cmd_buffer->mutex); -+ if (cmd_buffer->sync_obj != NULL) -+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); -+ mutex_unlock(&cmd_buffer->mutex); -+out_unlock: -+ return ret; -+} -+ -+#if 0 -+static int psb_dump_page(struct ttm_buffer_object *bo, -+ unsigned int page_offset, unsigned int num) -+{ -+ struct ttm_bo_kmap_obj kmobj; -+ int is_iomem; -+ uint32_t *p; -+ int ret; -+ unsigned int i; -+ -+ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj); -+ if (ret) -+ return ret; -+ -+ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem); -+ for (i = 0; i < num; ++i) -+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++); -+ -+ ttm_bo_kunmap(&kmobj); -+ return 0; -+} -+#endif -+ -+static void psb_idle_engine(struct drm_device *dev, int engine) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ uint32_t dummy; -+ unsigned long dummy2; -+ -+ switch (engine) { -+ case PSB_ENGINE_2D: -+ -+ /* -+ * Make sure we flush 2D properly using a dummy -+ * fence sequence emit. -+ */ -+ -+ (void) psb_fence_emit_sequence(&dev_priv->fdev, -+ PSB_ENGINE_2D, 0, -+ &dummy, &dummy2); -+ psb_2d_lock(dev_priv); -+ (void) psb_idle_2d(dev); -+ psb_2d_unlock(dev_priv); -+ break; -+ case PSB_ENGINE_TA: -+ case PSB_ENGINE_RASTERIZER: -+ case PSB_ENGINE_HPRAST: -+ (void) psb_idle_3d(dev); -+ break; -+ default: -+ -+ /* -+ * FIXME: Insert video engine idle command here. -+ */ -+ -+ break; -+ } -+} -+ -+static int psb_handle_copyback(struct drm_device *dev, -+ struct psb_context *context, -+ int ret) -+{ -+ int err = ret; -+ struct ttm_validate_buffer *entry; -+ struct psb_validate_arg arg; -+ struct list_head *list = &context->validate_list; -+ -+ if (ret) { -+ ttm_eu_backoff_reservation(list); -+ ttm_eu_backoff_reservation(&context->kern_validate_list); -+ } -+ -+ -+ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) { -+ list_for_each_entry(entry, list, head) { -+ struct psb_validate_buffer *vbuf = -+ container_of(entry, struct psb_validate_buffer, -+ base); -+ arg.handled = 1; -+ arg.ret = vbuf->ret; -+ if (!arg.ret) { -+ struct ttm_buffer_object *bo = entry->bo; -+ mutex_lock(&bo->mutex); -+ arg.d.rep.gpu_offset = bo->offset; -+ arg.d.rep.placement = bo->mem.flags; -+ arg.d.rep.fence_type_mask = -+ (uint32_t) (unsigned long) -+ entry->new_sync_obj_arg; -+ mutex_unlock(&bo->mutex); -+ } -+ -+ if (__copy_to_user(vbuf->user_val_arg, -+ &arg, sizeof(arg))) -+ err = -EFAULT; -+ -+ if (arg.ret) -+ break; -+ } -+ } -+ -+ return err; -+} -+ -+ -+static int psb_cmdbuf_video(struct drm_file *priv, -+ struct list_head *validate_list, -+ uint32_t fence_type, -+ struct drm_psb_cmdbuf_arg *arg, -+ struct ttm_buffer_object *cmd_buffer, -+ struct psb_ttm_fence_rep *fence_arg) -+{ -+ struct drm_device *dev = priv->minor->dev; -+ struct ttm_fence_object *fence; -+ int ret; -+ -+ /* -+ * Check this. Doesn't seem right. Have fencing done AFTER command -+ * submission and make sure drm_psb_idle idles the MSVDX completely. -+ */ -+ ret = -+ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset, -+ arg->cmdbuf_size, NULL); -+ if (ret) -+ return ret; -+ -+ -+ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */ -+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type, -+ arg->fence_flags, validate_list, fence_arg, -+ &fence); -+ -+ -+ ttm_fence_object_unref(&fence); -+ mutex_lock(&cmd_buffer->mutex); -+ if (cmd_buffer->sync_obj != NULL) -+ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj); -+ mutex_unlock(&cmd_buffer->mutex); -+ return 0; -+} -+ -+static int psb_feedback_buf(struct ttm_object_file *tfile, -+ struct psb_context *context, -+ uint32_t feedback_ops, -+ uint32_t handle, -+ uint32_t offset, -+ uint32_t feedback_breakpoints, -+ uint32_t feedback_size, -+ struct psb_feedback_info *feedback) -+{ -+ struct ttm_buffer_object *bo; -+ struct page *page; -+ uint32_t page_no; -+ uint32_t page_offset; -+ int ret; -+ -+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) { -+ DRM_ERROR("Illegal feedback op.\n"); -+ return -EINVAL; -+ } -+ -+ if (feedback_breakpoints != 0) { -+ DRM_ERROR("Feedback breakpoints not implemented yet.\n"); -+ return -EINVAL; -+ } -+ -+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) { -+ DRM_ERROR("Feedback buffer size too small.\n"); -+ return -EINVAL; -+ } -+ -+ page_offset = offset & ~PAGE_MASK; -+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) -+ < page_offset) { -+ DRM_ERROR("Illegal feedback buffer alignment.\n"); -+ return -EINVAL; -+ } -+ -+ bo = ttm_buffer_object_lookup(tfile, handle); -+ if (unlikely(bo == NULL)) { -+ DRM_ERROR("Failed looking up feedback buffer.\n"); -+ return -EINVAL; -+ } -+ -+ -+ ret = psb_validate_kernel_buffer(context, bo, -+ PSB_ENGINE_TA, -+ TTM_PL_FLAG_SYSTEM | -+ TTM_PL_FLAG_CACHED | -+ PSB_GPU_ACCESS_WRITE | -+ PSB_BO_FLAG_FEEDBACK, -+ TTM_PL_MASK_MEM & -+ ~(TTM_PL_FLAG_SYSTEM | -+ TTM_PL_FLAG_CACHED)); -+ if (unlikely(ret != 0)) -+ goto out_unref; -+ -+ page_no = offset >> PAGE_SHIFT; -+ if (unlikely(page_no >= bo->num_pages)) { -+ ret = -EINVAL; -+ DRM_ERROR("Illegal feedback buffer offset.\n"); -+ goto out_unref; -+ } -+ -+ if (unlikely(bo->ttm == NULL)) { -+ ret = -EINVAL; -+ DRM_ERROR("Vistest buffer without TTM.\n"); -+ goto out_unref; -+ } -+ -+ page = ttm_tt_get_page(bo->ttm, page_no); -+ if (unlikely(page == NULL)) { -+ ret = -ENOMEM; -+ goto out_unref; -+ } -+ -+ feedback->page = page; -+ feedback->offset = page_offset; -+ -+ /* -+ * Note: bo referece transferred. -+ */ -+ -+ feedback->bo = bo; -+ return 0; -+ -+out_unref: -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+void psb_down_island_power(struct drm_device *dev, int islands) -+{ -+ u32 pwr_cnt = 0; -+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT); -+ if (islands & PSB_GRAPHICS_ISLAND) -+ pwr_cnt |= 0x3; -+ if (islands & PSB_VIDEO_ENC_ISLAND) -+ pwr_cnt |= 0x30; -+ if (islands & PSB_VIDEO_DEC_ISLAND) -+ pwr_cnt |= 0xc; -+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt); -+} -+void psb_up_island_power(struct drm_device *dev, int islands) -+{ -+ u32 pwr_cnt = 0; -+ u32 count = 5; -+ u32 pwr_sts = 0; -+ u32 pwr_mask = 0; -+ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT); -+ if (islands & PSB_GRAPHICS_ISLAND) { -+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK; -+ pwr_mask |= PSB_PWRGT_GFX_MASK; -+ } -+ if (islands & PSB_VIDEO_ENC_ISLAND) { -+ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK; -+ pwr_mask |= PSB_PWRGT_VID_ENC_MASK; -+ } -+ if (islands & PSB_VIDEO_DEC_ISLAND) { -+ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK; -+ pwr_mask |= PSB_PWRGT_VID_DEC_MASK; -+ } -+ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt); -+ while (count--) { -+ pwr_sts = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_STS); -+ if ((pwr_sts & pwr_mask) == 0) -+ break; -+ else -+ udelay(10); -+ } -+} -+ -+static int psb_power_down_sgx(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ -+ PSB_DEBUG_PM("power down sgx \n"); -+ -+#ifdef OSPM_STAT -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0) -+ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change; -+ else -+ PSB_DEBUG_PM("power down:illegal previous power state\n"); -+ dev_priv->gfx_last_mode_change = jiffies; -+ dev_priv->gfx_d0i3_cnt++; -+#endif -+ -+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL); -+ dev_priv->graphics_state = PSB_PWR_STATE_D0i3; -+ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND); -+ return 0; -+} -+static int psb_power_up_sgx(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) != -+ PSB_PWR_STATE_D0i3) -+ return -EINVAL; -+ -+ PSB_DEBUG_PM("power up sgx \n"); -+ if (unlikely(PSB_D_PM & drm_psb_debug)) -+ dump_stack(); -+ INIT_LIST_HEAD(&dev_priv->resume_buf.head); -+ -+ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND); -+ -+ /* -+ * The SGX loses it's register contents. -+ * Restore BIF registers. The MMU page tables are -+ * "normal" pages, so their contents should be kept. -+ */ -+ -+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL); -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); -+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); -+ PSB_RSGX32(PSB_CR_BIF_BANK1); -+ -+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0); -+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1); -+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK); -+ -+ /* -+ * 2D Base registers.. -+ */ -+ psb_init_2d(dev_priv); -+ /* -+ * Persistant 3D base registers and USSE base registers.. -+ */ -+ -+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE); -+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE); -+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2); -+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE); -+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); -+ /* -+ * Now, re-initialize the 3D engine. -+ */ -+ if (dev_priv->xhw_on) -+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf); -+ -+ psb_scheduler_ta_mem_check(dev_priv); -+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) { -+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf, -+ PSB_TA_MEM_FLAG_TA | -+ PSB_TA_MEM_FLAG_RASTER | -+ PSB_TA_MEM_FLAG_HOSTA | -+ PSB_TA_MEM_FLAG_HOSTD | -+ PSB_TA_MEM_FLAG_INIT, -+ dev_priv->ta_mem->ta_memory->offset, -+ dev_priv->ta_mem->hw_data->offset, -+ dev_priv->ta_mem->hw_cookie); -+ } -+ -+#ifdef OSPM_STAT -+ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3) -+ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change; -+ else -+ PSB_DEBUG_PM("power up:illegal previous power state\n"); -+ dev_priv->gfx_last_mode_change = jiffies; -+ dev_priv->gfx_d0i0_cnt++; -+#endif -+ -+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0; -+ -+ return 0; -+} -+ -+int psb_try_power_down_sgx(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)dev->dev_private; -+ struct psb_scheduler *scheduler = &dev_priv->scheduler; -+ int ret; -+ if (!down_write_trylock(&dev_priv->sgx_sem)) -+ return -EBUSY; -+ /*Try lock 2d, because FB driver ususally use 2D engine.*/ -+ if (!psb_2d_trylock(dev_priv)) { -+ ret = -EBUSY; -+ goto out_err0; -+ } -+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) != -+ PSB_PWR_STATE_D0i0) { -+ ret = -EINVAL; -+ goto out_err1; -+ } -+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY) || -+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) != 0)) { -+ ret = -EBUSY; -+ goto out_err1; -+ } -+ if (!scheduler->idle || -+ !list_empty(&scheduler->raster_queue) || -+ !list_empty(&scheduler->ta_queue) || -+ !list_empty(&scheduler->hp_raster_queue)) { -+ ret = -EBUSY; -+ goto out_err1; -+ } -+ /*flush_scheduled_work();*/ -+ ret = psb_power_down_sgx(dev); -+out_err1: -+ psb_2d_atomic_unlock(dev_priv); -+out_err0: -+ up_write(&dev_priv->sgx_sem); -+ return ret; -+} -+/*check power state, if in sleep, wake up*/ -+void psb_check_power_state(struct drm_device *dev, int devices) -+{ -+ struct pci_dev *pdev = dev->pdev; -+ struct drm_psb_private *dev_priv = dev->dev_private; -+ down(&dev_priv->pm_sem); -+ switch (pdev->current_state) { -+ case PCI_D3hot: -+ dev->driver->pci_driver.resume(pdev); -+ break; -+ default: -+ -+ if (devices & PSB_DEVICE_SGX) { -+ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) == -+ PSB_PWR_STATE_D0i3) { -+ /*power up sgx*/ -+ psb_power_up_sgx(dev); -+ } -+ } else if (devices & PSB_DEVICE_MSVDX) { -+ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) == -+ PSB_PWR_STATE_D0i3) { -+ psb_power_up_msvdx(dev); -+ } else { -+ dev_priv->msvdx_last_action = jiffies; -+ } -+ } -+ break; -+ } -+ up(&dev_priv->pm_sem); -+} -+ -+void psb_init_ospm(struct drm_psb_private *dev_priv) -+{ -+ static int init; -+ if (!init) { -+ dev_priv->graphics_state = PSB_PWR_STATE_D0i0; -+ init_rwsem(&dev_priv->sgx_sem); -+ sema_init(&dev_priv->pm_sem, 1); -+#ifdef OSPM_STAT -+ dev_priv->gfx_last_mode_change = jiffies; -+ dev_priv->gfx_d0i0_time = 0; -+ dev_priv->gfx_d0i3_time = 0; -+ dev_priv->gfx_d3_time = 0; -+#endif -+ init = 1; -+ } -+} -+ -+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_cmdbuf_arg *arg = data; -+ int ret = 0; -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ struct ttm_buffer_object *cmd_buffer = NULL; -+ struct ttm_buffer_object *ta_buffer = NULL; -+ struct ttm_buffer_object *oom_buffer = NULL; -+ struct psb_ttm_fence_rep fence_arg; -+ struct drm_psb_scene user_scene; -+ struct psb_scene_pool *pool = NULL; -+ struct psb_scene *scene = NULL; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *)file_priv->minor->dev->dev_private; -+ int engine; -+ struct psb_feedback_info feedback; -+ int po_correct; -+ struct psb_context *context; -+ unsigned num_buffers; -+ -+ num_buffers = PSB_NUM_VALIDATE_BUFFERS; -+ -+ ret = ttm_read_lock(&dev_priv->ttm_lock, true); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) -+ || (arg->engine == PSB_ENGINE_RASTERIZER)) { -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ } -+ -+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ -+ -+ context = &dev_priv->context; -+ context->used_buffers = 0; -+ context->fence_types = 0; -+ BUG_ON(!list_empty(&context->validate_list)); -+ BUG_ON(!list_empty(&context->kern_validate_list)); -+ -+ if (unlikely(context->buffers == NULL)) { -+ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS * -+ sizeof(*context->buffers)); -+ if (unlikely(context->buffers == NULL)) { -+ ret = -ENOMEM; -+ goto out_err1; -+ } -+ } -+ -+ ret = psb_reference_buffers(file_priv, -+ arg->buffer_list, -+ context); -+ -+ if (unlikely(ret != 0)) -+ goto out_err1; -+ -+ context->val_seq = atomic_add_return(1, &dev_priv->val_seq); -+ -+ ret = ttm_eu_reserve_buffers(&context->validate_list, -+ context->val_seq); -+ if (unlikely(ret != 0)) { -+ goto out_err2; -+ } -+ -+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ? -+ PSB_ENGINE_TA : arg->engine; -+ -+ ret = psb_validate_buffer_list(file_priv, engine, -+ context, &po_correct); -+ if (unlikely(ret != 0)) -+ goto out_err3; -+ -+ if (!po_correct) { -+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs, -+ arg->reloc_offset, -+ arg->reloc_handle, context, 0, 1); -+ if (unlikely(ret != 0)) -+ goto out_err3; -+ -+ } -+ -+ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle); -+ if (unlikely(cmd_buffer == NULL)) { -+ ret = -EINVAL; -+ goto out_err4; -+ } -+ -+ switch (arg->engine) { -+ case PSB_ENGINE_2D: -+ ret = psb_cmdbuf_2d(file_priv, &context->validate_list, -+ context->fence_types, arg, cmd_buffer, -+ &fence_arg); -+ if (unlikely(ret != 0)) -+ goto out_err4; -+ break; -+ case PSB_ENGINE_VIDEO: -+ psb_check_power_state(dev, PSB_DEVICE_MSVDX); -+ ret = psb_cmdbuf_video(file_priv, &context->validate_list, -+ context->fence_types, arg, -+ cmd_buffer, &fence_arg); -+ -+ if (unlikely(ret != 0)) -+ goto out_err4; -+ break; -+ case LNC_ENGINE_ENCODE: -+ psb_check_power_state(dev, PSB_DEVICE_TOPAZ); -+ ret = lnc_cmdbuf_video(file_priv, &context->validate_list, -+ context->fence_types, arg, -+ cmd_buffer, &fence_arg); -+ if (unlikely(ret != 0)) -+ goto out_err4; -+ break; -+ case PSB_ENGINE_RASTERIZER: -+ ret = psb_cmdbuf_raster(file_priv, context, -+ arg, cmd_buffer, &fence_arg); -+ if (unlikely(ret != 0)) -+ goto out_err4; -+ break; -+ case PSB_ENGINE_TA: -+ if (arg->ta_handle == arg->cmdbuf_handle) { -+ ta_buffer = ttm_bo_reference(cmd_buffer); -+ } else { -+ ta_buffer = -+ ttm_buffer_object_lookup(tfile, -+ arg->ta_handle); -+ if (!ta_buffer) { -+ ret = -EINVAL; -+ goto out_err4; -+ } -+ } -+ if (arg->oom_size != 0) { -+ if (arg->oom_handle == arg->cmdbuf_handle) { -+ oom_buffer = ttm_bo_reference(cmd_buffer); -+ } else { -+ oom_buffer = -+ ttm_buffer_object_lookup(tfile, -+ arg-> -+ oom_handle); -+ if (!oom_buffer) { -+ ret = -EINVAL; -+ goto out_err4; -+ } -+ } -+ } -+ -+ ret = copy_from_user(&user_scene, (void __user *) -+ ((unsigned long) arg->scene_arg), -+ sizeof(user_scene)); -+ if (ret) -+ goto out_err4; -+ -+ if (!user_scene.handle_valid) { -+ pool = psb_scene_pool_alloc(file_priv, 0, -+ user_scene.num_buffers, -+ user_scene.w, -+ user_scene.h); -+ if (!pool) { -+ ret = -ENOMEM; -+ goto out_err0; -+ } -+ -+ user_scene.handle = psb_scene_pool_handle(pool); -+ user_scene.handle_valid = 1; -+ ret = copy_to_user((void __user *) -+ ((unsigned long) arg-> -+ scene_arg), &user_scene, -+ sizeof(user_scene)); -+ -+ if (ret) -+ goto out_err4; -+ } else { -+ pool = -+ psb_scene_pool_lookup(file_priv, -+ user_scene.handle, 1); -+ if (!pool) { -+ ret = -EINVAL; -+ goto out_err4; -+ } -+ } -+ -+ ret = psb_validate_scene_pool(context, pool, -+ user_scene.w, -+ user_scene.h, -+ arg->ta_flags & -+ PSB_TA_FLAG_LASTPASS, &scene); -+ if (ret) -+ goto out_err4; -+ -+ memset(&feedback, 0, sizeof(feedback)); -+ if (arg->feedback_ops) { -+ ret = psb_feedback_buf(tfile, -+ context, -+ arg->feedback_ops, -+ arg->feedback_handle, -+ arg->feedback_offset, -+ arg->feedback_breakpoints, -+ arg->feedback_size, -+ &feedback); -+ if (ret) -+ goto out_err4; -+ } -+ ret = psb_cmdbuf_ta(file_priv, context, -+ arg, cmd_buffer, ta_buffer, -+ oom_buffer, scene, &feedback, -+ &fence_arg); -+ if (ret) -+ goto out_err4; -+ break; -+ default: -+ DRM_ERROR -+ ("Unimplemented command submission mechanism (%x).\n", -+ arg->engine); -+ ret = -EINVAL; -+ goto out_err4; -+ } -+ -+ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) { -+ ret = copy_to_user((void __user *) -+ ((unsigned long) arg->fence_arg), -+ &fence_arg, sizeof(fence_arg)); -+ } -+ -+out_err4: -+ if (scene) -+ psb_scene_unref(&scene); -+ if (pool) -+ psb_scene_pool_unref(&pool); -+ if (cmd_buffer) -+ ttm_bo_unref(&cmd_buffer); -+ if (ta_buffer) -+ ttm_bo_unref(&ta_buffer); -+ if (oom_buffer) -+ ttm_bo_unref(&oom_buffer); -+out_err3: -+ ret = psb_handle_copyback(dev, context, ret); -+out_err2: -+ psb_unreference_buffers(context); -+out_err1: -+ mutex_unlock(&dev_priv->cmdbuf_mutex); -+out_err0: -+ ttm_read_unlock(&dev_priv->ttm_lock); -+ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA) -+ || (arg->engine == PSB_ENGINE_RASTERIZER)) -+ up_read(&dev_priv->sgx_sem); -+ return ret; -+} -diff -uNr a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h ---- a/drivers/gpu/drm/psb/psb_sgx.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_sgx.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,41 @@ -+/* -+ * Copyright (c) 2008, Intel Corporation -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -+ * SOFTWARE. -+ * -+ * Authors: -+ * Eric Anholt <eric@anholt.net> -+ * -+ **/ -+#ifndef _PSB_SGX_H_ -+#define _PSB_SGX_H_ -+ -+extern int psb_submit_video_cmdbuf(struct drm_device *dev, -+ struct ttm_buffer_object *cmd_buffer, -+ unsigned long cmd_offset, -+ unsigned long cmd_size, -+ struct ttm_fence_object *fence); -+ -+extern int psb_2d_wait_available(struct drm_psb_private *dev_priv, -+ unsigned size); -+extern int drm_idle_check_interval; -+extern int drm_psb_ospm; -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c ---- a/drivers/gpu/drm/psb/psb_ttm_glue.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_ttm_glue.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,345 @@ -+/************************************************************************** -+ * Copyright (c) 2008, Intel Corporation. -+ * All Rights Reserved. -+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA. -+ * All Rights Reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ * -+ * You should have received a copy of the GNU General Public License along with -+ * this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ * develop this driver. -+ * -+ **************************************************************************/ -+/* -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "ttm/ttm_userobj_api.h" -+ -+static struct vm_operations_struct psb_ttm_vm_ops; -+ -+int psb_open(struct inode *inode, struct file *filp) -+{ -+ struct drm_file *file_priv; -+ struct drm_psb_private *dev_priv; -+ struct psb_fpriv *psb_fp; -+ int ret; -+ -+ ret = drm_open(inode, filp); -+ if (unlikely(ret)) -+ return ret; -+ -+ psb_fp = drm_calloc(1, sizeof(*psb_fp), DRM_MEM_FILES); -+ -+ if (unlikely(psb_fp == NULL)) -+ goto out_err0; -+ -+ file_priv = (struct drm_file *) filp->private_data; -+ dev_priv = psb_priv(file_priv->minor->dev); -+ -+ -+ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev, -+ PSB_FILE_OBJECT_HASH_ORDER); -+ if (unlikely(psb_fp->tfile == NULL)) -+ goto out_err1; -+ -+ file_priv->driver_priv = psb_fp; -+ -+ if (unlikely(dev_priv->bdev.dev_mapping == NULL)) -+ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping; -+ -+ return 0; -+ -+out_err1: -+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES); -+out_err0: -+ (void) drm_release(inode, filp); -+ return ret; -+} -+ -+int psb_release(struct inode *inode, struct file *filp) -+{ -+ struct drm_file *file_priv; -+ struct psb_fpriv *psb_fp; -+ struct drm_psb_private *dev_priv; -+ int ret; -+ -+ file_priv = (struct drm_file *) filp->private_data; -+ psb_fp = psb_fpriv(file_priv); -+ dev_priv = psb_priv(file_priv->minor->dev); -+ -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(file_priv->minor->dev, PSB_DEVICE_SGX); -+ -+ ttm_object_file_release(&psb_fp->tfile); -+ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES); -+ -+ if (dev_priv && dev_priv->xhw_file) -+ psb_xhw_init_takedown(dev_priv, file_priv, 1); -+ -+ ret = drm_release(inode, filp); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev_priv->dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 0); -+ return ret; -+} -+ -+int psb_fence_signaled_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_fence_finish_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_fence_unref_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ PSB_DEBUG_PM("ioctl: psb_pl_reference\n"); -+ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data); -+} -+ -+int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile, -+ &psb_priv(dev)->ttm_lock, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ int ret; -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_pl_unref_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ int ret; -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_pl_reference_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ int ret; -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+int psb_pl_create_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = psb_priv(dev); -+ int ret; -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile, -+ &dev_priv->bdev, &dev_priv->ttm_lock, data); -+ up_read(&dev_priv->sgx_sem); -+ if (drm_psb_ospm && IS_MRST(dev)) -+ schedule_delayed_work(&dev_priv->scheduler.wq, 1); -+ return ret; -+} -+ -+/** -+ * psb_ttm_fault - Wrapper around the ttm fault method. -+ * -+ * @vma: The struct vm_area_struct as in the vm fault() method. -+ * @vmf: The struct vm_fault as in the vm fault() method. -+ * -+ * Since ttm_fault() will reserve buffers while faulting, -+ * we need to take the ttm read lock around it, as this driver -+ * relies on the ttm_lock in write mode to exclude all threads from -+ * reserving and thus validating buffers in aperture- and memory shortage -+ * situations. -+ */ -+ -+static int psb_ttm_fault(struct vm_area_struct *vma, -+ struct vm_fault *vmf) -+{ -+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *) -+ vma->vm_private_data; -+ struct drm_psb_private *dev_priv = -+ container_of(bo->bdev, struct drm_psb_private, bdev); -+ int ret; -+ -+ ret = ttm_read_lock(&dev_priv->ttm_lock, true); -+ if (unlikely(ret != 0)) -+ return VM_FAULT_NOPAGE; -+ -+ ret = dev_priv->ttm_vm_ops->fault(vma, vmf); -+ -+ ttm_read_unlock(&dev_priv->ttm_lock); -+ return ret; -+} -+ -+ -+int psb_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ struct drm_file *file_priv; -+ struct drm_psb_private *dev_priv; -+ int ret; -+ -+ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET)) -+ return drm_mmap(filp, vma); -+ -+ file_priv = (struct drm_file *) filp->private_data; -+ dev_priv = psb_priv(file_priv->minor->dev); -+ -+ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ if (unlikely(dev_priv->ttm_vm_ops == NULL)) { -+ dev_priv->ttm_vm_ops = vma->vm_ops; -+ psb_ttm_vm_ops = *vma->vm_ops; -+ psb_ttm_vm_ops.fault = &psb_ttm_fault; -+ } -+ -+ vma->vm_ops = &psb_ttm_vm_ops; -+ -+ return 0; -+} -+ -+ssize_t psb_ttm_write(struct file *filp, const char __user *buf, -+ size_t count, loff_t *f_pos) -+{ -+ struct drm_file *file_priv = (struct drm_file *)filp->private_data; -+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); -+ -+ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1); -+} -+ -+ssize_t psb_ttm_read(struct file *filp, char __user *buf, -+ size_t count, loff_t *f_pos) -+{ -+ struct drm_file *file_priv = (struct drm_file *)filp->private_data; -+ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev); -+ -+ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1); -+} -+ -+int psb_verify_access(struct ttm_buffer_object *bo, -+ struct file *filp) -+{ -+ struct drm_file *file_priv = (struct drm_file *)filp->private_data; -+ -+ if (capable(CAP_SYS_ADMIN)) -+ return 0; -+ -+ if (unlikely(!file_priv->authenticated)) -+ return -EPERM; -+ -+ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile); -+} -+ -+static int psb_ttm_mem_global_init(struct drm_global_reference *ref) -+{ -+ return ttm_mem_global_init(ref->object); -+} -+ -+static void psb_ttm_mem_global_release(struct drm_global_reference *ref) -+{ -+ ttm_mem_global_release(ref->object); -+} -+ -+int psb_ttm_global_init(struct drm_psb_private *dev_priv) -+{ -+ struct drm_global_reference *global_ref; -+ int ret; -+ -+ global_ref = &dev_priv->mem_global_ref; -+ global_ref->global_type = DRM_GLOBAL_TTM_MEM; -+ global_ref->size = sizeof(struct ttm_mem_global); -+ global_ref->init = &psb_ttm_mem_global_init; -+ global_ref->release = &psb_ttm_mem_global_release; -+ -+ ret = drm_global_item_ref(global_ref); -+ if (unlikely(ret != 0)) { -+ DRM_ERROR("Failed referencing a global TTM memory object.\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+void psb_ttm_global_release(struct drm_psb_private *dev_priv) -+{ -+ drm_global_item_unref(&dev_priv->mem_global_ref); -+} -diff -uNr a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c ---- a/drivers/gpu/drm/psb/psb_xhw.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/psb_xhw.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,629 @@ -+/************************************************************************** -+ *Copyright (c) 2007-2008, Intel Corporation. -+ *All Rights Reserved. -+ * -+ *This program is free software; you can redistribute it and/or modify it -+ *under the terms and conditions of the GNU General Public License, -+ *version 2, as published by the Free Software Foundation. -+ * -+ *This program is distributed in the hope it will be useful, but WITHOUT -+ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ *more details. -+ * -+ *You should have received a copy of the GNU General Public License along with -+ *this program; if not, write to the Free Software Foundation, Inc., -+ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -+ * -+ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to -+ *develop this driver. -+ * -+ **************************************************************************/ -+/* -+ *Make calls into closed source X server code. -+ */ -+ -+#include <drm/drmP.h> -+#include "psb_drv.h" -+#include "ttm/ttm_userobj_api.h" -+ -+void -+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ list_del_init(&buf->head); -+ if (dev_priv->xhw_cur_buf == buf) -+ dev_priv->xhw_cur_buf = NULL; -+ atomic_set(&buf->done, 1); -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+} -+ -+static inline int psb_xhw_add(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ atomic_set(&buf->done, 0); -+ if (unlikely(!dev_priv->xhw_submit_ok)) { -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ DRM_ERROR("No Xpsb 3D extension available.\n"); -+ return -EINVAL; -+ } -+ if (!list_empty(&buf->head)) { -+ DRM_ERROR("Recursive list adding.\n"); -+ goto out; -+ } -+ list_add_tail(&buf->head, &dev_priv->xhw_in); -+ wake_up_interruptible(&dev_priv->xhw_queue); -+out: -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ return 0; -+} -+ -+int psb_xhw_scene_info(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t w, -+ uint32_t h, -+ uint32_t *hw_cookie, -+ uint32_t *bo_size, -+ uint32_t *clear_p_start, -+ uint32_t *clear_num_pages) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ int ret; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_SCENE_INFO; -+ xa->irq_op = 0; -+ xa->issue_irq = 0; -+ xa->arg.si.w = w; -+ xa->arg.si.h = h; -+ -+ ret = psb_xhw_add(dev_priv, buf); -+ if (ret) -+ return ret; -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), DRM_HZ); -+ -+ if (!atomic_read(&buf->done)) { -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ if (!xa->ret) { -+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); -+ *bo_size = xa->arg.si.size; -+ *clear_p_start = xa->arg.si.clear_p_start; -+ *clear_num_pages = xa->arg.si.clear_num_pages; -+ } -+ return xa->ret; -+} -+ -+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t fire_flags) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ buf->copy_back = 0; -+ xa->op = PSB_XHW_FIRE_RASTER; -+ xa->issue_irq = 0; -+ xa->arg.sb.fire_flags = 0; -+ -+ return psb_xhw_add(dev_priv, buf); -+} -+ -+int psb_xhw_vistest(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_VISTEST; -+ /* -+ *Could perhaps decrease latency somewhat by -+ *issuing an irq in this case. -+ */ -+ xa->issue_irq = 0; -+ xa->irq_op = PSB_UIRQ_VISTEST; -+ return psb_xhw_add(dev_priv, buf); -+} -+ -+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t fire_flags, -+ uint32_t hw_context, -+ uint32_t *cookie, -+ uint32_t *oom_cmds, -+ uint32_t num_oom_cmds, -+ uint32_t offset, uint32_t engine, -+ uint32_t flags) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM); -+ xa->op = PSB_XHW_SCENE_BIND_FIRE; -+ xa->issue_irq = (buf->copy_back) ? 1 : 0; -+ if (unlikely(buf->copy_back)) -+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ? -+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY; -+ else -+ xa->irq_op = 0; -+ xa->arg.sb.fire_flags = fire_flags; -+ xa->arg.sb.hw_context = hw_context; -+ xa->arg.sb.offset = offset; -+ xa->arg.sb.engine = engine; -+ xa->arg.sb.flags = flags; -+ xa->arg.sb.num_oom_cmds = num_oom_cmds; -+ memcpy(xa->cookie, cookie, sizeof(xa->cookie)); -+ if (num_oom_cmds) -+ memcpy(xa->arg.sb.oom_cmds, oom_cmds, -+ sizeof(uint32_t) * num_oom_cmds); -+ return psb_xhw_add(dev_priv, buf); -+} -+ -+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ int ret; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_RESET_DPM; -+ xa->issue_irq = 0; -+ xa->irq_op = 0; -+ -+ ret = psb_xhw_add(dev_priv, buf); -+ if (ret) -+ return ret; -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), 3 * DRM_HZ); -+ -+ if (!atomic_read(&buf->done)) { -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ return xa->ret; -+} -+ -+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *value) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ int ret; -+ -+ *value = 0; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_CHECK_LOCKUP; -+ xa->issue_irq = 0; -+ xa->irq_op = 0; -+ -+ ret = psb_xhw_add(dev_priv, buf); -+ if (ret) -+ return ret; -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), DRM_HZ * 3); -+ -+ if (!atomic_read(&buf->done)) { -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ if (!xa->ret) -+ *value = xa->arg.cl.value; -+ -+ return xa->ret; -+} -+ -+static int psb_xhw_terminate(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ unsigned long irq_flags; -+ -+ buf->copy_back = 0; -+ xa->op = PSB_XHW_TERMINATE; -+ xa->issue_irq = 0; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ dev_priv->xhw_submit_ok = 0; -+ atomic_set(&buf->done, 0); -+ if (!list_empty(&buf->head)) { -+ DRM_ERROR("Recursive list adding.\n"); -+ goto out; -+ } -+ list_add_tail(&buf->head, &dev_priv->xhw_in); -+out: -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ wake_up_interruptible(&dev_priv->xhw_queue); -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), DRM_HZ / 10); -+ -+ if (!atomic_read(&buf->done)) { -+ DRM_ERROR("Xpsb terminate timeout.\n"); -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ return 0; -+} -+ -+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t pages, uint32_t * hw_cookie, -+ uint32_t * size, -+ uint32_t * ta_min_size) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ int ret; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_TA_MEM_INFO; -+ xa->issue_irq = 0; -+ xa->irq_op = 0; -+ xa->arg.bi.pages = pages; -+ -+ ret = psb_xhw_add(dev_priv, buf); -+ if (ret) -+ return ret; -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), DRM_HZ); -+ -+ if (!atomic_read(&buf->done)) { -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ if (!xa->ret) -+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); -+ -+ *size = xa->arg.bi.size; -+ *ta_min_size = xa->arg.bi.ta_min_size; -+ return xa->ret; -+} -+ -+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t flags, -+ uint32_t param_offset, -+ uint32_t pt_offset, uint32_t *hw_cookie) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ int ret; -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_TA_MEM_LOAD; -+ xa->issue_irq = 0; -+ xa->irq_op = 0; -+ xa->arg.bl.flags = flags; -+ xa->arg.bl.param_offset = param_offset; -+ xa->arg.bl.pt_offset = pt_offset; -+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie)); -+ -+ ret = psb_xhw_add(dev_priv, buf); -+ if (ret) -+ return ret; -+ -+ (void) wait_event_timeout(dev_priv->xhw_caller_queue, -+ atomic_read(&buf->done), 3 * DRM_HZ); -+ -+ if (!atomic_read(&buf->done)) { -+ psb_xhw_clean_buf(dev_priv, buf); -+ return -EBUSY; -+ } -+ -+ if (!xa->ret) -+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie)); -+ -+ return xa->ret; -+} -+ -+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *cookie) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ /* -+ *This calls the extensive closed source -+ *OOM handler, which resolves the condition and -+ *sends a reply telling the scheduler what to do -+ *with the task. -+ */ -+ -+ buf->copy_back = 1; -+ xa->op = PSB_XHW_OOM; -+ xa->issue_irq = 1; -+ xa->irq_op = PSB_UIRQ_OOM_REPLY; -+ memcpy(xa->cookie, cookie, sizeof(xa->cookie)); -+ -+ return psb_xhw_add(dev_priv, buf); -+} -+ -+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, -+ uint32_t *cookie, -+ uint32_t *bca, uint32_t *rca, uint32_t *flags) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ /* -+ *Get info about how to schedule an OOM task. -+ */ -+ -+ memcpy(cookie, xa->cookie, sizeof(xa->cookie)); -+ *bca = xa->arg.oom.bca; -+ *rca = xa->arg.oom.rca; -+ *flags = xa->arg.oom.flags; -+} -+ -+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf, uint32_t *cookie) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ memcpy(cookie, xa->cookie, sizeof(xa->cookie)); -+} -+ -+int psb_xhw_resume(struct drm_psb_private *dev_priv, -+ struct psb_xhw_buf *buf) -+{ -+ struct drm_psb_xhw_arg *xa = &buf->arg; -+ -+ buf->copy_back = 0; -+ xa->op = PSB_XHW_RESUME; -+ xa->issue_irq = 0; -+ xa->irq_op = 0; -+ return psb_xhw_add(dev_priv, buf); -+} -+ -+void psb_xhw_takedown(struct drm_psb_private *dev_priv) -+{ -+} -+ -+int psb_xhw_init(struct drm_device *dev) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ unsigned long irq_flags; -+ -+ INIT_LIST_HEAD(&dev_priv->xhw_in); -+ spin_lock_init(&dev_priv->xhw_lock); -+ atomic_set(&dev_priv->xhw_client, 0); -+ init_waitqueue_head(&dev_priv->xhw_queue); -+ init_waitqueue_head(&dev_priv->xhw_caller_queue); -+ mutex_init(&dev_priv->xhw_mutex); -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ dev_priv->xhw_on = 0; -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ -+ return 0; -+} -+ -+static int psb_xhw_init_init(struct drm_device *dev, -+ struct drm_file *file_priv, -+ struct drm_psb_xhw_init_arg *arg) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile; -+ int ret; -+ bool is_iomem; -+ -+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) { -+ unsigned long irq_flags; -+ -+ dev_priv->xhw_bo = -+ ttm_buffer_object_lookup(tfile, arg->buffer_handle); -+ if (!dev_priv->xhw_bo) { -+ ret = -EINVAL; -+ goto out_err; -+ } -+ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0, -+ dev_priv->xhw_bo->num_pages, -+ &dev_priv->xhw_kmap); -+ if (ret) { -+ DRM_ERROR("Failed mapping X server " -+ "communications buffer.\n"); -+ goto out_err0; -+ } -+ dev_priv->xhw = -+ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem); -+ if (is_iomem) { -+ DRM_ERROR("X server communications buffer" -+ "is in device memory.\n"); -+ ret = -EINVAL; -+ goto out_err1; -+ } -+ dev_priv->xhw_file = file_priv; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ dev_priv->xhw_on = 1; -+ dev_priv->xhw_submit_ok = 1; -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ return 0; -+ } else { -+ DRM_ERROR("Xhw is already initialized.\n"); -+ return -EBUSY; -+ } -+out_err1: -+ dev_priv->xhw = NULL; -+ ttm_bo_kunmap(&dev_priv->xhw_kmap); -+out_err0: -+ ttm_bo_unref(&dev_priv->xhw_bo); -+out_err: -+ atomic_dec(&dev_priv->xhw_client); -+ return ret; -+} -+ -+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv) -+{ -+ struct psb_xhw_buf *cur_buf, *next; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ dev_priv->xhw_submit_ok = 0; -+ -+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) { -+ list_del_init(&cur_buf->head); -+ if (cur_buf->copy_back) -+ cur_buf->arg.ret = -EINVAL; -+ atomic_set(&cur_buf->done, 1); -+ } -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ wake_up(&dev_priv->xhw_caller_queue); -+} -+ -+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv, -+ struct drm_file *file_priv, int closing) -+{ -+ -+ if (dev_priv->xhw_file == file_priv && -+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) { -+ -+ if (closing) -+ psb_xhw_queue_empty(dev_priv); -+ else { -+ struct psb_xhw_buf buf; -+ INIT_LIST_HEAD(&buf.head); -+ -+ psb_xhw_terminate(dev_priv, &buf); -+ psb_xhw_queue_empty(dev_priv); -+ } -+ -+ dev_priv->xhw = NULL; -+ ttm_bo_kunmap(&dev_priv->xhw_kmap); -+ ttm_bo_unref(&dev_priv->xhw_bo); -+ dev_priv->xhw_file = NULL; -+ } -+} -+ -+int psb_xhw_init_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_xhw_init_arg *arg = -+ (struct drm_psb_xhw_init_arg *) data; -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ int ret = 0; -+ down_read(&dev_priv->sgx_sem); -+ psb_check_power_state(dev, PSB_DEVICE_SGX); -+ switch (arg->operation) { -+ case PSB_XHW_INIT: -+ ret = psb_xhw_init_init(dev, file_priv, arg); -+ break; -+ case PSB_XHW_TAKEDOWN: -+ psb_xhw_init_takedown(dev_priv, file_priv, 0); -+ break; -+ } -+ up_read(&dev_priv->sgx_sem); -+ return ret; -+} -+ -+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv) -+{ -+ int empty; -+ unsigned long irq_flags; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ empty = list_empty(&dev_priv->xhw_in); -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ return empty; -+} -+ -+int psb_xhw_handler(struct drm_psb_private *dev_priv) -+{ -+ unsigned long irq_flags; -+ struct drm_psb_xhw_arg *xa; -+ struct psb_xhw_buf *buf; -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ -+ if (!dev_priv->xhw_on) { -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ return -EINVAL; -+ } -+ -+ buf = dev_priv->xhw_cur_buf; -+ if (buf && buf->copy_back) { -+ xa = &buf->arg; -+ memcpy(xa, dev_priv->xhw, sizeof(*xa)); -+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op; -+ atomic_set(&buf->done, 1); -+ wake_up(&dev_priv->xhw_caller_queue); -+ } else -+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0; -+ -+ dev_priv->xhw_cur_buf = 0; -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ return 0; -+} -+ -+int psb_xhw_ioctl(struct drm_device *dev, void *data, -+ struct drm_file *file_priv) -+{ -+ struct drm_psb_private *dev_priv = -+ (struct drm_psb_private *) dev->dev_private; -+ unsigned long irq_flags; -+ struct drm_psb_xhw_arg *xa; -+ int ret; -+ struct list_head *list; -+ struct psb_xhw_buf *buf; -+ -+ if (!dev_priv) -+ return -EINVAL; -+ -+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex)) -+ return -ERESTART; -+ -+ if (psb_forced_user_interrupt(dev_priv)) { -+ mutex_unlock(&dev_priv->xhw_mutex); -+ return -EINVAL; -+ } -+ -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ while (list_empty(&dev_priv->xhw_in)) { -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue, -+ !psb_xhw_in_empty -+ (dev_priv), DRM_HZ); -+ if (ret == -ERESTARTSYS || ret == 0) { -+ mutex_unlock(&dev_priv->xhw_mutex); -+ return -ERESTART; -+ } -+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags); -+ } -+ -+ list = dev_priv->xhw_in.next; -+ list_del_init(list); -+ -+ buf = list_entry(list, struct psb_xhw_buf, head); -+ xa = &buf->arg; -+ memcpy(dev_priv->xhw, xa, sizeof(*xa)); -+ -+ if (unlikely(buf->copy_back)) -+ dev_priv->xhw_cur_buf = buf; -+ else { -+ atomic_set(&buf->done, 1); -+ dev_priv->xhw_cur_buf = NULL; -+ } -+ -+ if (xa->op == PSB_XHW_TERMINATE) { -+ dev_priv->xhw_on = 0; -+ wake_up(&dev_priv->xhw_caller_queue); -+ } -+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags); -+ -+ mutex_unlock(&dev_priv->xhw_mutex); -+ -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c ---- a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,149 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ * Keith Packard. -+ */ -+ -+#include "ttm/ttm_bo_driver.h" -+#ifdef TTM_HAS_AGP -+#include "ttm/ttm_placement_common.h" -+#include <linux/agp_backend.h> -+#include <asm/agp.h> -+#include <asm/io.h> -+ -+struct ttm_agp_backend { -+ struct ttm_backend backend; -+ struct agp_memory *mem; -+ struct agp_bridge_data *bridge; -+}; -+ -+static int ttm_agp_populate(struct ttm_backend *backend, -+ unsigned long num_pages, struct page **pages, -+ struct page *dummy_read_page) -+{ -+ struct ttm_agp_backend *agp_be = -+ container_of(backend, struct ttm_agp_backend, backend); -+ struct page **cur_page, **last_page = pages + num_pages; -+ struct agp_memory *mem; -+ -+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); -+ if (unlikely(mem == NULL)) -+ return -ENOMEM; -+ -+ mem->page_count = 0; -+ for (cur_page = pages; cur_page < last_page; ++cur_page) { -+ struct page *page = *cur_page; -+ if (!page) { -+ page = dummy_read_page; -+ } -+ mem->memory[mem->page_count++] = -+ phys_to_gart(page_to_phys(page)); -+ } -+ agp_be->mem = mem; -+ return 0; -+} -+ -+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) -+{ -+ struct ttm_agp_backend *agp_be = -+ container_of(backend, struct ttm_agp_backend, backend); -+ struct agp_memory *mem = agp_be->mem; -+ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED); -+ int ret; -+ -+ mem->is_flushed = 1; -+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; -+ -+ ret = agp_bind_memory(mem, bo_mem->mm_node->start); -+ if (ret) -+ printk(KERN_ERR "AGP Bind memory failed.\n"); -+ -+ return ret; -+} -+ -+static int ttm_agp_unbind(struct ttm_backend *backend) -+{ -+ struct ttm_agp_backend *agp_be = -+ container_of(backend, struct ttm_agp_backend, backend); -+ -+ if (agp_be->mem->is_bound) -+ return agp_unbind_memory(agp_be->mem); -+ else -+ return 0; -+} -+ -+static void ttm_agp_clear(struct ttm_backend *backend) -+{ -+ struct ttm_agp_backend *agp_be = -+ container_of(backend, struct ttm_agp_backend, backend); -+ struct agp_memory *mem = agp_be->mem; -+ -+ if (mem) { -+ ttm_agp_unbind(backend); -+ agp_free_memory(mem); -+ } -+ agp_be->mem = NULL; -+} -+ -+static void ttm_agp_destroy(struct ttm_backend *backend) -+{ -+ struct ttm_agp_backend *agp_be = -+ container_of(backend, struct ttm_agp_backend, backend); -+ -+ if (agp_be->mem) -+ ttm_agp_clear(backend); -+ kfree(agp_be); -+} -+ -+static struct ttm_backend_func ttm_agp_func = { -+ .populate = ttm_agp_populate, -+ .clear = ttm_agp_clear, -+ .bind = ttm_agp_bind, -+ .unbind = ttm_agp_unbind, -+ .destroy = ttm_agp_destroy, -+}; -+ -+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, -+ struct agp_bridge_data *bridge) -+{ -+ struct ttm_agp_backend *agp_be; -+ -+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL); -+ if (!agp_be) -+ return NULL; -+ -+ agp_be->mem = NULL; -+ agp_be->bridge = bridge; -+ agp_be->backend.func = &ttm_agp_func; -+ agp_be->backend.bdev = bdev; -+ return &agp_be->backend; -+} -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h ---- a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,578 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_BO_API_H_ -+#define _TTM_BO_API_H_ -+ -+#include <drm/drm_hashtab.h> -+#include <linux/kref.h> -+#include <linux/list.h> -+#include <linux/wait.h> -+#include <linux/mutex.h> -+#include <linux/mm.h> -+#include <linux/rbtree.h> -+ -+struct ttm_bo_device; -+ -+struct drm_mm_node; -+ -+/** -+ * struct ttm_mem_reg -+ * -+ * @mm_node: Memory manager node. -+ * @size: Requested size of memory region. -+ * @num_pages: Actual size of memory region in pages. -+ * @page_alignment: Page alignment. -+ * @flags: Placement flags. -+ * @proposed_flags: Proposed placement flags. -+ * -+ * Structure indicating the placement and space resources used by a -+ * buffer object. -+ */ -+ -+struct ttm_mem_reg { -+ struct drm_mm_node *mm_node; -+ unsigned long size; -+ unsigned long num_pages; -+ uint32_t page_alignment; -+ uint32_t mem_type; -+ uint32_t flags; -+ uint32_t proposed_flags; -+}; -+ -+/** -+ * enum ttm_bo_type -+ * -+ * @ttm_bo_type_device: These are 'normal' buffers that can -+ * be mmapped by user space. Each of these bos occupy a slot in the -+ * device address space, that can be used for normal vm operations. -+ * -+ * @ttm_bo_type_user: These are user-space memory areas that are made -+ * available to the GPU by mapping the buffer pages into the GPU aperture -+ * space. These buffers cannot be mmaped from the device address space. -+ * -+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, -+ * but they cannot be accessed from user-space. For kernel-only use. -+ */ -+ -+enum ttm_bo_type { -+ ttm_bo_type_device, -+ ttm_bo_type_user, -+ ttm_bo_type_kernel -+}; -+ -+struct ttm_tt; -+ -+/** -+ * struct ttm_buffer_object -+ * -+ * @bdev: Pointer to the buffer object device structure. -+ * @kref: Reference count of this buffer object. When this refcount reaches -+ * zero, the object is put on the delayed delete list. -+ * @list_kref: List reference count of this buffer object. This member is -+ * used to avoid destruction while the buffer object is still on a list. -+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0 -+ * keeps one refcount. When this refcount reaches zero, -+ * the object is destroyed. -+ * @proposed_flags: Proposed placement for the buffer. Changed only by the -+ * creator prior to validation as opposed to bo->mem.proposed_flags which is -+ * changed by the implementation prior to a buffer move if it wants to outsmart -+ * the buffer creator / user. This latter happens, for example, at eviction. -+ * @buffer_start: The virtual user-space start address of ttm_bo_type_user -+ * buffers. -+ * @type: The bo type. -+ * @offset: The current GPU offset, which can have different meanings -+ * depending on the memory type. For SYSTEM type memory, it should be 0. -+ * @mem: structure describing current placement. -+ * @val_seq: Sequence of the validation holding the @reserved lock. -+ * Used to avoid starvation when many processes compete to validate the -+ * buffer. This member is protected by the bo_device::lru_lock. -+ * @seq_valid: The value of @val_seq is valid. This value is protected by -+ * the bo_device::lru_lock. -+ * @lru: List head for the lru list. -+ * @ddestroy: List head for the delayed destroy list. -+ * @swap: List head for swap LRU list. -+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers -+ * pinned in physical memory. If this behaviour is not desired, this member -+ * holds a pointer to a persistant shmem object. -+ * @destroy: Destruction function. If NULL, kfree is used. -+ * @sync_obj_arg: Opaque argument to synchronization object function. -+ * @sync_obj: Pointer to a synchronization object. -+ * @priv_flags: Flags describing buffer object internal state. -+ * @event_queue: Queue for processes waiting on buffer object status change. -+ * @mutex: Lock protecting all members with the exception of constant members -+ * and list heads. We should really use a spinlock here. -+ * @num_pages: Actual number of pages. -+ * @ttm: TTM structure holding system pages. -+ * @vm_hash: Hash item for fast address space lookup. Need to change to a -+ * rb-tree node. -+ * @vm_node: Address space manager node. -+ * @addr_space_offset: Address space offset. -+ * @cpu_writes: For synchronization. Number of cpu writers. -+ * @reserved: Deadlock-free lock used for synchronization state transitions. -+ * @acc_size: Accounted size for this object. -+ * -+ * Base class for TTM buffer object, that deals with data placement and CPU -+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs -+ * the driver can usually use the placement offset @offset directly as the -+ * GPU virtual address. For drivers implementing multiple -+ * GPU memory manager contexts, the driver should manage the address space -+ * in these contexts separately and use these objects to get the correct -+ * placement and caching for these GPU maps. This makes it possible to use -+ * these objects for even quite elaborate memory management schemes. -+ * The destroy member, the API visibility of this object makes it possible -+ * to derive driver specific types. -+ */ -+ -+struct ttm_buffer_object { -+ struct ttm_bo_device *bdev; -+ struct kref kref; -+ struct kref list_kref; -+ -+ /* -+ * If there is a possibility that the usage variable is zero, -+ * then dev->struct_mutex should be locked before incrementing it. -+ */ -+ -+ uint32_t proposed_flags; -+ unsigned long buffer_start; -+ enum ttm_bo_type type; -+ unsigned long offset; -+ struct ttm_mem_reg mem; -+ uint32_t val_seq; -+ bool seq_valid; -+ -+ struct list_head lru; -+ struct list_head ddestroy; -+ struct list_head swap; -+ -+ struct file *persistant_swap_storage; -+ -+ void (*destroy) (struct ttm_buffer_object *); -+ -+ void *sync_obj_arg; -+ void *sync_obj; -+ -+ uint32_t priv_flags; -+ wait_queue_head_t event_queue; -+ struct mutex mutex; -+ unsigned long num_pages; -+ -+ struct ttm_tt *ttm; -+ struct rb_node vm_rb; -+ struct drm_mm_node *vm_node; -+ uint64_t addr_space_offset; -+ -+ atomic_t cpu_writers; -+ atomic_t reserved; -+ -+ size_t acc_size; -+}; -+ -+/** -+ * struct ttm_bo_kmap_obj -+ * -+ * @virtual: The current kernel virtual address. -+ * @page: The page when kmap'ing a single page. -+ * @bo_kmap_type: Type of bo_kmap. -+ * -+ * Object describing a kernel mapping. Since a TTM bo may be located -+ * in various memory types with various caching policies, the -+ * mapping can either be an ioremap, a vmap, a kmap or part of a -+ * premapped region. -+ */ -+ -+struct ttm_bo_kmap_obj { -+ void *virtual; -+ struct page *page; -+ enum { -+ ttm_bo_map_iomap, -+ ttm_bo_map_vmap, -+ ttm_bo_map_kmap, -+ ttm_bo_map_premapped, -+ } bo_kmap_type; -+}; -+ -+/** -+ * ttm_bo_reference - reference a struct ttm_buffer_object -+ * -+ * @bo: The buffer object. -+ * -+ * Returns a refcounted pointer to a buffer object. -+ */ -+ -+static inline struct ttm_buffer_object *ttm_bo_reference(struct -+ ttm_buffer_object *bo) -+{ -+ kref_get(&bo->kref); -+ return bo; -+} -+ -+/** -+ * ttm_bo_wait - wait for buffer idle. -+ * -+ * @bo: The buffer object. -+ * @interruptible: Use interruptible wait. -+ * @no_wait: Return immediately if buffer is busy. -+ * -+ * This function must be called with the bo::mutex held, and makes -+ * sure any previous rendering to the buffer is completed. -+ * Note: It might be necessary to block validations before the -+ * wait by reserving the buffer. -+ * Returns -EBUSY if no_wait is true and the buffer is busy. -+ * Returns -ERESTART if interrupted by a signal. -+ */ -+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, -+ bool interruptible, bool no_wait); -+/** -+ * ttm_buffer_object_validate -+ * -+ * @bo: The buffer object. -+ * @interruptible: Sleep interruptible if sleeping. -+ * @no_wait: Return immediately if the buffer is busy. -+ * -+ * Changes placement and caching policy of the buffer object -+ * according to bo::proposed_flags. -+ * Returns -+ * -EINVAL on invalid proposed_flags. -+ * -ENOMEM on out-of-memory condition. -+ * -EBUSY if no_wait is true and buffer busy. -+ * -ERESTART if interrupted by a signal. -+ */ -+extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, -+ bool interruptible, bool no_wait); -+/** -+ * ttm_bo_unref -+ * -+ * @bo: The buffer object. -+ * -+ * Unreference and clear a pointer to a buffer object. -+ */ -+extern void ttm_bo_unref(struct ttm_buffer_object **bo); -+ -+/** -+ * ttm_bo_synccpu_write_grab -+ * -+ * @bo: The buffer object: -+ * @no_wait: Return immediately if buffer is busy. -+ * -+ * Synchronizes a buffer object for CPU RW access. This means -+ * blocking command submission that affects the buffer and -+ * waiting for buffer idle. This lock is recursive. -+ * Returns -+ * -EBUSY if the buffer is busy and no_wait is true. -+ * -ERESTART if interrupted by a signal. -+ */ -+ -+extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); -+/** -+ * ttm_bo_synccpu_write_release: -+ * -+ * @bo : The buffer object. -+ * -+ * Releases a synccpu lock. -+ */ -+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); -+ -+/** -+ * ttm_buffer_object_init -+ * -+ * @bdev: Pointer to a ttm_bo_device struct. -+ * @bo: Pointer to a ttm_buffer_object to be initialized. -+ * @size: Requested size of buffer object. -+ * @type: Requested type of buffer object. -+ * @flags: Initial placement flags. -+ * @page_alignment: Data alignment in pages. -+ * @buffer_start: Virtual address of user space data backing a -+ * user buffer object. -+ * @interruptible: If needing to sleep to wait for GPU resources, -+ * sleep interruptible. -+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers -+ * pinned in physical memory. If this behaviour is not desired, this member -+ * holds a pointer to a persistant shmem object. Typically, this would -+ * point to the shmem object backing a GEM object if TTM is used to back a -+ * GEM user interface. -+ * @acc_size: Accounted size for this object. -+ * @destroy: Destroy function. Use NULL for kfree(). -+ * -+ * This function initializes a pre-allocated struct ttm_buffer_object. -+ * As this object may be part of a larger structure, this function, -+ * together with the @destroy function, -+ * enables driver-specific objects derived from a ttm_buffer_object. -+ * On successful return, the object kref and list_kref are set to 1. -+ * Returns -+ * -ENOMEM: Out of memory. -+ * -EINVAL: Invalid placement flags. -+ * -ERESTART: Interrupted by signal while sleeping waiting for resources. -+ */ -+ -+extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, -+ struct ttm_buffer_object *bo, -+ unsigned long size, -+ enum ttm_bo_type type, -+ uint32_t flags, -+ uint32_t page_alignment, -+ unsigned long buffer_start, -+ bool interrubtible, -+ struct file *persistant_swap_storage, -+ size_t acc_size, -+ void (*destroy) (struct ttm_buffer_object *)); -+/** -+ * ttm_bo_synccpu_object_init -+ * -+ * @bdev: Pointer to a ttm_bo_device struct. -+ * @bo: Pointer to a ttm_buffer_object to be initialized. -+ * @size: Requested size of buffer object. -+ * @type: Requested type of buffer object. -+ * @flags: Initial placement flags. -+ * @page_alignment: Data alignment in pages. -+ * @buffer_start: Virtual address of user space data backing a -+ * user buffer object. -+ * @interruptible: If needing to sleep while waiting for GPU resources, -+ * sleep interruptible. -+ * @persistant_swap_storage: Usually the swap storage is deleted for buffers -+ * pinned in physical memory. If this behaviour is not desired, this member -+ * holds a pointer to a persistant shmem object. Typically, this would -+ * point to the shmem object backing a GEM object if TTM is used to back a -+ * GEM user interface. -+ * @p_bo: On successful completion *p_bo points to the created object. -+ * -+ * This function allocates a ttm_buffer_object, and then calls -+ * ttm_buffer_object_init on that object. -+ * The destroy function is set to kfree(). -+ * Returns -+ * -ENOMEM: Out of memory. -+ * -EINVAL: Invalid placement flags. -+ * -ERESTART: Interrupted by signal while waiting for resources. -+ */ -+ -+extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, -+ unsigned long size, -+ enum ttm_bo_type type, -+ uint32_t flags, -+ uint32_t page_alignment, -+ unsigned long buffer_start, -+ bool interruptible, -+ struct file *persistant_swap_storage, -+ struct ttm_buffer_object **p_bo); -+ -+/** -+ * ttm_bo_check_placement -+ * -+ * @bo: the buffer object. -+ * @set_flags: placement flags to set. -+ * @clr_flags: placement flags to clear. -+ * -+ * Performs minimal validity checking on an intended change of -+ * placement flags. -+ * Returns -+ * -EINVAL: Intended change is invalid or not allowed. -+ */ -+ -+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, -+ uint32_t set_flags, uint32_t clr_flags); -+ -+/** -+ * ttm_bo_init_mm -+ * -+ * @bdev: Pointer to a ttm_bo_device struct. -+ * @mem_type: The memory type. -+ * @p_offset: offset for managed area in pages. -+ * @p_size: size managed area in pages. -+ * -+ * Initialize a manager for a given memory type. -+ * Note: if part of driver firstopen, it must be protected from a -+ * potentially racing lastclose. -+ * Returns: -+ * -EINVAL: invalid size or memory type. -+ * -ENOMEM: Not enough memory. -+ * May also return driver-specified errors. -+ */ -+ -+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, -+ unsigned long p_offset, unsigned long p_size); -+/** -+ * ttm_bo_clean_mm -+ * -+ * @bdev: Pointer to a ttm_bo_device struct. -+ * @mem_type: The memory type. -+ * -+ * Take down a manager for a given memory type after first walking -+ * the LRU list to evict any buffers left alive. -+ * -+ * Normally, this function is part of lastclose() or unload(), and at that -+ * point there shouldn't be any buffers left created by user-space, since -+ * there should've been removed by the file descriptor release() method. -+ * However, before this function is run, make sure to signal all sync objects, -+ * and verify that the delayed delete queue is empty. The driver must also -+ * make sure that there are no NO_EVICT buffers present in this memory type -+ * when the call is made. -+ * -+ * If this function is part of a VT switch, the caller must make sure that -+ * there are no appications currently validating buffers before this -+ * function is called. The caller can do that by first taking the -+ * struct ttm_bo_device::ttm_lock in write mode. -+ * -+ * Returns: -+ * -EINVAL: invalid or uninitialized memory type. -+ * -EBUSY: There are still buffers left in this memory type. -+ */ -+ -+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); -+ -+/** -+ * ttm_bo_evict_mm -+ * -+ * @bdev: Pointer to a ttm_bo_device struct. -+ * @mem_type: The memory type. -+ * -+ * Evicts all buffers on the lru list of the memory type. -+ * This is normally part of a VT switch or an -+ * out-of-memory-space-due-to-fragmentation handler. -+ * The caller must make sure that there are no other processes -+ * currently validating buffers, and can do that by taking the -+ * struct ttm_bo_device::ttm_lock in write mode. -+ * -+ * Returns: -+ * -EINVAL: Invalid or uninitialized memory type. -+ * -ERESTART: The call was interrupted by a signal while waiting to -+ * evict a buffer. -+ */ -+ -+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); -+ -+/** -+ * ttm_kmap_obj_virtual -+ * -+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. -+ * @is_iomem: Pointer to an integer that on return indicates 1 if the -+ * virtual map is io memory, 0 if normal memory. -+ * -+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. -+ * If *is_iomem is 1 on return, the virtual address points to an io memory area, -+ * that should strictly be accessed by the iowriteXX() and similar functions. -+ */ -+ -+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, -+ bool *is_iomem) -+{ -+ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap || -+ map->bo_kmap_type == ttm_bo_map_premapped); -+ return map->virtual; -+} -+ -+/** -+ * ttm_bo_kmap -+ * -+ * @bo: The buffer object. -+ * @start_page: The first page to map. -+ * @num_pages: Number of pages to map. -+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map. -+ * -+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the -+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be -+ * used to obtain a virtual address to the data. -+ * -+ * Returns -+ * -ENOMEM: Out of memory. -+ * -EINVAL: Invalid range. -+ */ -+ -+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, -+ unsigned long num_pages, struct ttm_bo_kmap_obj *map); -+ -+/** -+ * ttm_bo_kunmap -+ * -+ * @map: Object describing the map to unmap. -+ * -+ * Unmaps a kernel map set up by ttm_bo_kmap. -+ */ -+ -+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); -+ -+#if 0 -+#endif -+ -+/** -+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. -+ * -+ * @vma: vma as input from the fbdev mmap method. -+ * @bo: The bo backing the address space. The address space will -+ * have the same size as the bo, and start at offset 0. -+ * -+ * This function is intended to be called by the fbdev mmap method -+ * if the fbdev address space is to be backed by a bo. -+ */ -+ -+extern int ttm_fbdev_mmap(struct vm_area_struct *vma, -+ struct ttm_buffer_object *bo); -+ -+/** -+ * ttm_bo_mmap - mmap out of the ttm device address space. -+ * -+ * @filp: filp as input from the mmap method. -+ * @vma: vma as input from the mmap method. -+ * @bdev: Pointer to the ttm_bo_device with the address space manager. -+ * -+ * This function is intended to be called by the device mmap method. -+ * if the device address space is to be backed by the bo manager. -+ */ -+ -+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, -+ struct ttm_bo_device *bdev); -+ -+/** -+ * ttm_bo_io -+ * -+ * @bdev: Pointer to the struct ttm_bo_device. -+ * @filp: Pointer to the struct file attempting to read / write. -+ * @wbuf: User-space pointer to address of buffer to write. NULL on read. -+ * @rbuf: User-space pointer to address of buffer to read into. Null on write. -+ * @count: Number of bytes to read / write. -+ * @f_pos: Pointer to current file position. -+ * @write: 1 for read, 0 for write. -+ * -+ * This function implements read / write into ttm buffer objects, and is intended to -+ * be called from the fops::read and fops::write method. -+ * Returns: -+ * See man (2) write, man(2) read. In particular, the function may return -EINTR if -+ * interrupted by a signal. -+ */ -+ -+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, -+ const char __user * wbuf, char __user * rbuf, -+ size_t count, loff_t * f_pos, bool write); -+ -+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c ---- a/drivers/gpu/drm/psb/ttm/ttm_bo.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,1716 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_placement_common.h" -+#include <linux/jiffies.h> -+#include <linux/slab.h> -+#include <linux/sched.h> -+#include <linux/mm.h> -+#include <linux/file.h> -+ -+#define TTM_ASSERT_LOCKED(param) -+#define TTM_DEBUG(fmt, arg...) -+#define TTM_BO_HASH_ORDER 13 -+ -+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); -+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); -+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); -+ -+static inline uint32_t ttm_bo_type_flags(unsigned type) -+{ -+ return (1 << (type)); -+} -+ -+static void ttm_bo_release_list(struct kref *list_kref) -+{ -+ struct ttm_buffer_object *bo = -+ container_of(list_kref, struct ttm_buffer_object, list_kref); -+ struct ttm_bo_device *bdev = bo->bdev; -+ -+ BUG_ON(atomic_read(&bo->list_kref.refcount)); -+ BUG_ON(atomic_read(&bo->kref.refcount)); -+ BUG_ON(atomic_read(&bo->cpu_writers)); -+ BUG_ON(bo->sync_obj != NULL); -+ BUG_ON(bo->mem.mm_node != NULL); -+ BUG_ON(!list_empty(&bo->lru)); -+ BUG_ON(!list_empty(&bo->ddestroy)); -+ -+ if (bo->ttm) -+ ttm_tt_destroy(bo->ttm); -+ if (bo->destroy) -+ bo->destroy(bo); -+ else { -+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); -+ kfree(bo); -+ } -+} -+ -+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) -+{ -+ -+ if (interruptible) { -+ int ret = 0; -+ -+ ret = wait_event_interruptible(bo->event_queue, -+ atomic_read(&bo->reserved) == 0); -+ if (unlikely(ret != 0)) -+ return -ERESTART; -+ } else { -+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); -+ } -+ return 0; -+} -+ -+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_mem_type_manager *man; -+ -+ BUG_ON(!atomic_read(&bo->reserved)); -+ -+ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) { -+ -+ BUG_ON(!list_empty(&bo->lru)); -+ -+ man = &bdev->man[bo->mem.mem_type]; -+ list_add_tail(&bo->lru, &man->lru); -+ kref_get(&bo->list_kref); -+ -+ if (bo->ttm != NULL) { -+ list_add_tail(&bo->swap, &bdev->swap_lru); -+ kref_get(&bo->list_kref); -+ } -+ } -+} -+ -+/* -+ * Call with bdev->lru_lock and bdev->global->swap_lock held.. -+ */ -+ -+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) -+{ -+ int put_count = 0; -+ -+ if (!list_empty(&bo->swap)) { -+ list_del_init(&bo->swap); -+ ++put_count; -+ } -+ if (!list_empty(&bo->lru)) { -+ list_del_init(&bo->lru); -+ ++put_count; -+ } -+ -+ /* -+ * TODO: Add a driver hook to delete from -+ * driver-specific LRU's here. -+ */ -+ -+ return put_count; -+} -+ -+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, -+ bool interruptible, -+ bool no_wait, bool use_sequence, uint32_t sequence) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ int ret; -+ -+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { -+ if (use_sequence && bo->seq_valid && -+ (sequence - bo->val_seq < (1 << 31))) { -+ return -EAGAIN; -+ } -+ -+ if (no_wait) -+ return -EBUSY; -+ -+ spin_unlock(&bdev->lru_lock); -+ ret = ttm_bo_wait_unreserved(bo, interruptible); -+ spin_lock(&bdev->lru_lock); -+ -+ if (unlikely(ret)) -+ return ret; -+ } -+ -+ if (use_sequence) { -+ bo->val_seq = sequence; -+ bo->seq_valid = true; -+ } else { -+ bo->seq_valid = false; -+ } -+ -+ return 0; -+} -+ -+static void ttm_bo_ref_bug(struct kref *list_kref) -+{ -+ BUG(); -+} -+ -+int ttm_bo_reserve(struct ttm_buffer_object *bo, -+ bool interruptible, -+ bool no_wait, bool use_sequence, uint32_t sequence) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ int put_count = 0; -+ int ret; -+ -+ spin_lock(&bdev->lru_lock); -+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, -+ sequence); -+ if (likely(ret == 0)) -+ put_count = ttm_bo_del_from_lru(bo); -+ spin_unlock(&bdev->lru_lock); -+ -+ while (put_count--) -+ kref_put(&bo->list_kref, ttm_bo_ref_bug); -+ -+ return ret; -+} -+ -+void ttm_bo_unreserve(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ -+ spin_lock(&bdev->lru_lock); -+ ttm_bo_add_to_lru(bo); -+ atomic_set(&bo->reserved, 0); -+ wake_up_all(&bo->event_queue); -+ spin_unlock(&bdev->lru_lock); -+} -+ -+/* -+ * Call bo->mutex locked. -+ */ -+ -+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ int ret = 0; -+ uint32_t page_flags = 0; -+ -+ TTM_ASSERT_LOCKED(&bo->mutex); -+ bo->ttm = NULL; -+ -+ switch (bo->type) { -+ case ttm_bo_type_device: -+ case ttm_bo_type_kernel: -+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, -+ page_flags, bdev->dummy_read_page); -+ if (unlikely(bo->ttm == NULL)) -+ ret = -ENOMEM; -+ break; -+ case ttm_bo_type_user: -+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, -+ page_flags | TTM_PAGE_FLAG_USER, -+ bdev->dummy_read_page); -+ if (unlikely(bo->ttm == NULL)) -+ ret = -ENOMEM; -+ break; -+ -+ ret = ttm_tt_set_user(bo->ttm, current, -+ bo->buffer_start, bo->num_pages); -+ if (unlikely(ret != 0)) -+ ttm_tt_destroy(bo->ttm); -+ break; -+ default: -+ printk(KERN_ERR "Illegal buffer object type\n"); -+ ret = -EINVAL; -+ break; -+ } -+ -+ return ret; -+} -+ -+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, -+ struct ttm_mem_reg *mem, -+ bool evict, bool interruptible, bool no_wait) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); -+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); -+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; -+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; -+ int ret = 0; -+ -+ if (old_is_pci || new_is_pci || -+ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0)) -+ ttm_bo_unmap_virtual(bo); -+ -+ /* -+ * Create and bind a ttm if required. -+ */ -+ -+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { -+ ret = ttm_bo_add_ttm(bo); -+ if (ret) -+ goto out_err; -+ -+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags); -+ if (ret) -+ return ret; -+ -+ if (mem->mem_type != TTM_PL_SYSTEM) { -+ ret = ttm_tt_bind(bo->ttm, mem); -+ if (ret) -+ goto out_err; -+ } -+ -+ if (bo->mem.mem_type == TTM_PL_SYSTEM) { -+ -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ uint32_t save_flags = old_mem->flags; -+ uint32_t save_proposed_flags = old_mem->proposed_flags; -+ -+ *old_mem = *mem; -+ mem->mm_node = NULL; -+ old_mem->proposed_flags = save_proposed_flags; -+ ttm_flag_masked(&save_flags, mem->flags, -+ TTM_PL_MASK_MEMTYPE); -+ goto moved; -+ } -+ -+ } -+ -+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && -+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) -+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); -+ else if (bdev->driver->move) -+ ret = bdev->driver->move(bo, evict, interruptible, -+ no_wait, mem); -+ else -+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); -+ -+ if (ret) -+ goto out_err; -+ -+ moved: -+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) { -+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags); -+ if (ret) -+ printk(KERN_ERR "Can not flush read caches\n"); -+ } -+ -+ ttm_flag_masked(&bo->priv_flags, -+ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0, -+ TTM_BO_PRIV_FLAG_EVICTED); -+ -+ if (bo->mem.mm_node) -+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + -+ bdev->man[bo->mem.mem_type].gpu_offset; -+ -+ return 0; -+ -+ out_err: -+ new_man = &bdev->man[bo->mem.mem_type]; -+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { -+ ttm_tt_unbind(bo->ttm); -+ ttm_tt_destroy(bo->ttm); -+ bo->ttm = NULL; -+ } -+ -+ return ret; -+} -+ -+static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo, -+ bool allow_errors) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_bo_driver *driver = bdev->driver; -+ -+ if (bo->sync_obj) { -+ if (bdev->nice_mode) { -+ unsigned long _end = jiffies + 3 * HZ; -+ int ret; -+ do { -+ ret = ttm_bo_wait(bo, false, false, false); -+ if (ret && allow_errors) -+ return ret; -+ -+ } while (ret && !time_after_eq(jiffies, _end)); -+ -+ if (bo->sync_obj) { -+ bdev->nice_mode = false; -+ printk(KERN_ERR "Detected probable GPU lockup. " -+ "Evicting buffer.\n"); -+ } -+ } -+ if (bo->sync_obj) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ } -+ } -+ return 0; -+} -+ -+/** -+ * If bo idle, remove from delayed- and lru lists, and unref. -+ * If not idle, and already on delayed list, do nothing. -+ * If not idle, and not on delayed list, put on delayed list, -+ * up the list_kref and schedule a delayed list check. -+ */ -+ -+static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_bo_driver *driver = bdev->driver; -+ -+ mutex_lock(&bo->mutex); -+ -+ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj, -+ bo->sync_obj_arg)) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ } -+ -+ if (bo->sync_obj && remove_all) -+ (void)ttm_bo_expire_sync_obj(bo, false); -+ -+ if (!bo->sync_obj) { -+ int put_count; -+ -+ if (bo->ttm) -+ ttm_tt_unbind(bo->ttm); -+ spin_lock(&bdev->lru_lock); -+ if (!list_empty(&bo->ddestroy)) { -+ list_del_init(&bo->ddestroy); -+ kref_put(&bo->list_kref, ttm_bo_ref_bug); -+ } -+ if (bo->mem.mm_node) { -+ drm_mm_put_block(bo->mem.mm_node); -+ bo->mem.mm_node = NULL; -+ } -+ put_count = ttm_bo_del_from_lru(bo); -+ spin_unlock(&bdev->lru_lock); -+ mutex_unlock(&bo->mutex); -+ while (put_count--) -+ kref_put(&bo->list_kref, ttm_bo_release_list); -+ -+ return; -+ } -+ -+ spin_lock(&bdev->lru_lock); -+ if (list_empty(&bo->ddestroy)) { -+ spin_unlock(&bdev->lru_lock); -+ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg); -+ spin_lock(&bdev->lru_lock); -+ if (list_empty(&bo->ddestroy)) { -+ kref_get(&bo->list_kref); -+ list_add_tail(&bo->ddestroy, &bdev->ddestroy); -+ } -+ spin_unlock(&bdev->lru_lock); -+ schedule_delayed_work(&bdev->wq, -+ ((HZ / 100) < 1) ? 1 : HZ / 100); -+ } else -+ spin_unlock(&bdev->lru_lock); -+ -+ mutex_unlock(&bo->mutex); -+ return; -+} -+ -+/** -+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all -+ * encountered buffers. -+ */ -+ -+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) -+{ -+ struct ttm_buffer_object *entry, *nentry; -+ struct list_head *list, *next; -+ int ret; -+ -+ spin_lock(&bdev->lru_lock); -+ list_for_each_safe(list, next, &bdev->ddestroy) { -+ entry = list_entry(list, struct ttm_buffer_object, ddestroy); -+ nentry = NULL; -+ -+ /* -+ * Protect the next list entry from destruction while we -+ * unlock the lru_lock. -+ */ -+ -+ if (next != &bdev->ddestroy) { -+ nentry = list_entry(next, struct ttm_buffer_object, -+ ddestroy); -+ kref_get(&nentry->list_kref); -+ } -+ kref_get(&entry->list_kref); -+ -+ spin_unlock(&bdev->lru_lock); -+ ttm_bo_cleanup_refs(entry, remove_all); -+ kref_put(&entry->list_kref, ttm_bo_release_list); -+ spin_lock(&bdev->lru_lock); -+ -+ if (nentry) { -+ bool next_onlist = !list_empty(next); -+ kref_put(&nentry->list_kref, ttm_bo_release_list); -+ -+ /* -+ * Someone might have raced us and removed the -+ * next entry from the list. We don't bother restarting -+ * list traversal. -+ */ -+ -+ if (!next_onlist) -+ break; -+ } -+ } -+ ret = !list_empty(&bdev->ddestroy); -+ spin_unlock(&bdev->lru_lock); -+ -+ return ret; -+} -+ -+static void ttm_bo_delayed_workqueue(struct work_struct *work) -+{ -+ struct ttm_bo_device *bdev = -+ container_of(work, struct ttm_bo_device, wq.work); -+ -+ if (ttm_bo_delayed_delete(bdev, false)) { -+ schedule_delayed_work(&bdev->wq, -+ ((HZ / 100) < 1) ? 1 : HZ / 100); -+ } -+} -+ -+static void ttm_bo_release(struct kref *kref) -+{ -+ struct ttm_buffer_object *bo = -+ container_of(kref, struct ttm_buffer_object, kref); -+ struct ttm_bo_device *bdev = bo->bdev; -+ -+ if (likely(bo->vm_node != NULL)) { -+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb); -+ drm_mm_put_block(bo->vm_node); -+ } -+ write_unlock(&bdev->vm_lock); -+ ttm_bo_cleanup_refs(bo, false); -+ kref_put(&bo->list_kref, ttm_bo_release_list); -+ write_lock(&bdev->vm_lock); -+} -+ -+void ttm_bo_unref(struct ttm_buffer_object **p_bo) -+{ -+ struct ttm_buffer_object *bo = *p_bo; -+ struct ttm_bo_device *bdev = bo->bdev; -+ -+ *p_bo = NULL; -+ write_lock(&bdev->vm_lock); -+ kref_put(&bo->kref, ttm_bo_release); -+ write_unlock(&bdev->vm_lock); -+} -+ -+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, -+ bool interruptible, bool no_wait) -+{ -+ int ret = 0; -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_mem_reg evict_mem; -+ -+ if (bo->mem.mem_type != mem_type) -+ goto out; -+ -+ ret = ttm_bo_wait(bo, false, interruptible, no_wait); -+ if (ret && ret != -ERESTART) { -+ printk(KERN_ERR "Failed to expire sync object before " -+ "buffer eviction.\n"); -+ goto out; -+ } -+ -+ BUG_ON(!atomic_read(&bo->reserved)); -+ -+ evict_mem = bo->mem; -+ evict_mem.mm_node = NULL; -+ -+ evict_mem.proposed_flags = bdev->driver->evict_flags(bo); -+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags); -+ -+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait); -+ if (unlikely(ret != 0 && ret != -ERESTART)) { -+ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM; -+ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags); -+ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait); -+ } -+ -+ if (ret) { -+ if (ret != -ERESTART) -+ printk(KERN_ERR "Failed to find memory space for " -+ "buffer 0x%p eviction.\n", bo); -+ goto out; -+ } -+ -+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait); -+ if (ret) { -+ if (ret != -ERESTART) -+ printk(KERN_ERR "Buffer eviction failed\n"); -+ goto out; -+ } -+ -+ spin_lock(&bdev->lru_lock); -+ if (evict_mem.mm_node) { -+ drm_mm_put_block(evict_mem.mm_node); -+ evict_mem.mm_node = NULL; -+ } -+ spin_unlock(&bdev->lru_lock); -+ -+ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED, -+ TTM_BO_PRIV_FLAG_EVICTED); -+ -+ out: -+ return ret; -+} -+ -+/** -+ * Repeatedly evict memory from the LRU for @mem_type until we create enough -+ * space, or we've evicted everything and there isn't enough space. -+ */ -+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem, -+ uint32_t mem_type, -+ bool interruptible, bool no_wait) -+{ -+ struct drm_mm_node *node; -+ struct ttm_buffer_object *entry; -+ struct ttm_mem_type_manager *man = &bdev->man[mem_type]; -+ struct list_head *lru; -+ unsigned long num_pages = mem->num_pages; -+ int put_count = 0; -+ int ret; -+ -+ retry_pre_get: -+ ret = drm_mm_pre_get(&man->manager); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ spin_lock(&bdev->lru_lock); -+ do { -+ node = drm_mm_search_free(&man->manager, num_pages, -+ mem->page_alignment, 1); -+ if (node) -+ break; -+ -+ lru = &man->lru; -+ if (list_empty(lru)) -+ break; -+ -+ entry = list_first_entry(lru, struct ttm_buffer_object, lru); -+ kref_get(&entry->list_kref); -+ -+ ret = -+ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0); -+ -+ if (likely(ret == 0)) -+ put_count = ttm_bo_del_from_lru(entry); -+ -+ spin_unlock(&bdev->lru_lock); -+ -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ while (put_count--) -+ kref_put(&entry->list_kref, ttm_bo_ref_bug); -+ -+ mutex_lock(&entry->mutex); -+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); -+ mutex_unlock(&entry->mutex); -+ -+ ttm_bo_unreserve(entry); -+ -+ kref_put(&entry->list_kref, ttm_bo_release_list); -+ if (ret) -+ return ret; -+ -+ spin_lock(&bdev->lru_lock); -+ } while (1); -+ -+ if (!node) { -+ spin_unlock(&bdev->lru_lock); -+ return -ENOMEM; -+ } -+ -+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); -+ if (unlikely(!node)) { -+ spin_unlock(&bdev->lru_lock); -+ goto retry_pre_get; -+ } -+ -+ spin_unlock(&bdev->lru_lock); -+ mem->mm_node = node; -+ mem->mem_type = mem_type; -+ return 0; -+} -+ -+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, -+ bool disallow_fixed, -+ uint32_t mem_type, -+ uint32_t mask, uint32_t * res_mask) -+{ -+ uint32_t cur_flags = ttm_bo_type_flags(mem_type); -+ -+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) -+ return false; -+ -+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) -+ return false; -+ -+ if ((mask & man->available_caching) == 0) -+ return false; -+ if (mask & man->default_caching) -+ cur_flags |= man->default_caching; -+ else if (mask & TTM_PL_FLAG_CACHED) -+ cur_flags |= TTM_PL_FLAG_CACHED; -+ else if (mask & TTM_PL_FLAG_WC) -+ cur_flags |= TTM_PL_FLAG_WC; -+ else -+ cur_flags |= TTM_PL_FLAG_UNCACHED; -+ -+ *res_mask = cur_flags; -+ return true; -+} -+ -+/** -+ * Creates space for memory region @mem according to its type. -+ * -+ * This function first searches for free space in compatible memory types in -+ * the priority order defined by the driver. If free space isn't found, then -+ * ttm_bo_mem_force_space is attempted in priority order to evict and find -+ * space. -+ */ -+int ttm_bo_mem_space(struct ttm_buffer_object *bo, -+ struct ttm_mem_reg *mem, bool interruptible, bool no_wait) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_mem_type_manager *man; -+ -+ uint32_t num_prios = bdev->driver->num_mem_type_prio; -+ const uint32_t *prios = bdev->driver->mem_type_prio; -+ uint32_t i; -+ uint32_t mem_type = TTM_PL_SYSTEM; -+ uint32_t cur_flags = 0; -+ bool type_found = false; -+ bool type_ok = false; -+ bool has_eagain = false; -+ struct drm_mm_node *node = NULL; -+ int ret; -+ -+ mem->mm_node = NULL; -+ for (i = 0; i < num_prios; ++i) { -+ mem_type = prios[i]; -+ man = &bdev->man[mem_type]; -+ -+ type_ok = ttm_bo_mt_compatible(man, -+ bo->type == ttm_bo_type_user, -+ mem_type, mem->proposed_flags, -+ &cur_flags); -+ -+ if (!type_ok) -+ continue; -+ -+ if (mem_type == TTM_PL_SYSTEM) -+ break; -+ -+ if (man->has_type && man->use_type) { -+ type_found = true; -+ do { -+ ret = drm_mm_pre_get(&man->manager); -+ if (unlikely(ret)) -+ return ret; -+ -+ spin_lock(&bdev->lru_lock); -+ node = drm_mm_search_free(&man->manager, -+ mem->num_pages, -+ mem->page_alignment, -+ 1); -+ if (unlikely(!node)) { -+ spin_unlock(&bdev->lru_lock); -+ break; -+ } -+ node = drm_mm_get_block_atomic(node, -+ mem->num_pages, -+ mem-> -+ page_alignment); -+ spin_unlock(&bdev->lru_lock); -+ } while (!node); -+ } -+ if (node) -+ break; -+ } -+ -+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { -+ mem->mm_node = node; -+ mem->mem_type = mem_type; -+ mem->flags = cur_flags; -+ return 0; -+ } -+ -+ if (!type_found) -+ return -EINVAL; -+ -+ num_prios = bdev->driver->num_mem_busy_prio; -+ prios = bdev->driver->mem_busy_prio; -+ -+ for (i = 0; i < num_prios; ++i) { -+ mem_type = prios[i]; -+ man = &bdev->man[mem_type]; -+ -+ if (!man->has_type) -+ continue; -+ -+ if (!ttm_bo_mt_compatible(man, -+ bo->type == ttm_bo_type_user, -+ mem_type, -+ mem->proposed_flags, &cur_flags)) -+ continue; -+ -+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type, -+ interruptible, no_wait); -+ -+ if (ret == 0 && mem->mm_node) { -+ mem->flags = cur_flags; -+ return 0; -+ } -+ -+ if (ret == -ERESTART) -+ has_eagain = true; -+ } -+ -+ ret = (has_eagain) ? -ERESTART : -ENOMEM; -+ return ret; -+} -+ -+/* -+ * Call bo->mutex locked. -+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. -+ */ -+ -+static int ttm_bo_busy(struct ttm_buffer_object *bo) -+{ -+ void *sync_obj = bo->sync_obj; -+ struct ttm_bo_driver *driver = bo->bdev->driver; -+ -+ if (sync_obj) { -+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ return 0; -+ } -+ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg); -+ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ return 0; -+ } -+ return 1; -+ } -+ return 0; -+} -+ -+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) -+{ -+ int ret = 0; -+ -+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) -+ return -EBUSY; -+ -+ ret = wait_event_interruptible(bo->event_queue, -+ atomic_read(&bo->cpu_writers) == 0); -+ -+ if (ret == -ERESTARTSYS) -+ ret = -ERESTART; -+ -+ return ret; -+} -+ -+/* -+ * bo->mutex locked. -+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. -+ */ -+ -+int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags, -+ bool interruptible, bool no_wait) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ int ret = 0; -+ struct ttm_mem_reg mem; -+ -+ BUG_ON(!atomic_read(&bo->reserved)); -+ -+ /* -+ * FIXME: It's possible to pipeline buffer moves. -+ * Have the driver move function wait for idle when necessary, -+ * instead of doing it here. -+ */ -+ -+ ttm_bo_busy(bo); -+ ret = ttm_bo_wait(bo, false, interruptible, no_wait); -+ if (ret) -+ return ret; -+ -+ mem.num_pages = bo->num_pages; -+ mem.size = mem.num_pages << PAGE_SHIFT; -+ mem.proposed_flags = new_mem_flags; -+ mem.page_alignment = bo->mem.page_alignment; -+ -+ /* -+ * Determine where to move the buffer. -+ */ -+ -+ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait); -+ if (ret) -+ goto out_unlock; -+ -+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); -+ -+ out_unlock: -+ if (ret && mem.mm_node) { -+ spin_lock(&bdev->lru_lock); -+ drm_mm_put_block(mem.mm_node); -+ spin_unlock(&bdev->lru_lock); -+ } -+ return ret; -+} -+ -+static int ttm_bo_mem_compat(struct ttm_mem_reg *mem) -+{ -+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0) -+ return 0; -+ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0) -+ return 0; -+ -+ return 1; -+} -+ -+int ttm_buffer_object_validate(struct ttm_buffer_object *bo, -+ bool interruptible, bool no_wait) -+{ -+ int ret; -+ -+ BUG_ON(!atomic_read(&bo->reserved)); -+ bo->mem.proposed_flags = bo->proposed_flags; -+ -+ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n", -+ (unsigned long)bo->mem.proposed_flags, -+ (unsigned long)bo->mem.flags); -+ -+ /* -+ * Check whether we need to move buffer. -+ */ -+ -+ if (!ttm_bo_mem_compat(&bo->mem)) { -+ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags, -+ interruptible, no_wait); -+ if (ret) { -+ if (ret != -ERESTART) -+ printk(KERN_ERR "Failed moving buffer. " -+ "Proposed placement 0x%08x\n", -+ bo->mem.proposed_flags); -+ if (ret == -ENOMEM) -+ printk(KERN_ERR "Out of aperture space or " -+ "DRM memory quota.\n"); -+ return ret; -+ } -+ } -+ -+ /* -+ * We might need to add a TTM. -+ */ -+ -+ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { -+ ret = ttm_bo_add_ttm(bo); -+ if (ret) -+ return ret; -+ } -+ /* -+ * Validation has succeeded, move the access and other -+ * non-mapping-related flag bits from the proposed flags to -+ * the active flags -+ */ -+ -+ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags, -+ ~TTM_PL_MASK_MEMTYPE); -+ -+ return 0; -+} -+ -+int -+ttm_bo_check_placement(struct ttm_buffer_object *bo, -+ uint32_t set_flags, uint32_t clr_flags) -+{ -+ uint32_t new_mask = set_flags | clr_flags; -+ -+ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) { -+ printk(KERN_ERR -+ "User buffers require cache-coherent memory.\n"); -+ return -EINVAL; -+ } -+ -+ if (!capable(CAP_SYS_ADMIN)) { -+ if (new_mask & TTM_PL_FLAG_NO_EVICT) { -+ printk(KERN_ERR "Need to be root to modify" -+ " NO_EVICT status.\n"); -+ return -EINVAL; -+ } -+ -+ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) && -+ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) { -+ printk(KERN_ERR "Incompatible memory specification" -+ " for NO_EVICT buffer.\n"); -+ return -EINVAL; -+ } -+ } -+ return 0; -+} -+ -+int ttm_buffer_object_init(struct ttm_bo_device *bdev, -+ struct ttm_buffer_object *bo, -+ unsigned long size, -+ enum ttm_bo_type type, -+ uint32_t flags, -+ uint32_t page_alignment, -+ unsigned long buffer_start, -+ bool interruptible, -+ struct file *persistant_swap_storage, -+ size_t acc_size, -+ void (*destroy) (struct ttm_buffer_object *)) -+{ -+ int ret = 0; -+ unsigned long num_pages; -+ -+ size += buffer_start & ~PAGE_MASK; -+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ if (num_pages == 0) { -+ printk(KERN_ERR "Illegal buffer object size.\n"); -+ return -EINVAL; -+ } -+ bo->destroy = destroy; -+ -+ mutex_init(&bo->mutex); -+ mutex_lock(&bo->mutex); -+ kref_init(&bo->kref); -+ kref_init(&bo->list_kref); -+ atomic_set(&bo->cpu_writers, 0); -+ atomic_set(&bo->reserved, 1); -+ init_waitqueue_head(&bo->event_queue); -+ INIT_LIST_HEAD(&bo->lru); -+ INIT_LIST_HEAD(&bo->ddestroy); -+ INIT_LIST_HEAD(&bo->swap); -+ bo->bdev = bdev; -+ bo->type = type; -+ bo->num_pages = num_pages; -+ bo->mem.mem_type = TTM_PL_SYSTEM; -+ bo->mem.num_pages = bo->num_pages; -+ bo->mem.mm_node = NULL; -+ bo->mem.page_alignment = page_alignment; -+ bo->buffer_start = buffer_start & PAGE_MASK; -+ bo->priv_flags = 0; -+ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); -+ bo->seq_valid = false; -+ bo->persistant_swap_storage = persistant_swap_storage; -+ bo->acc_size = acc_size; -+ -+ ret = ttm_bo_check_placement(bo, flags, 0ULL); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ /* -+ * If no caching attributes are set, accept any form of caching. -+ */ -+ -+ if ((flags & TTM_PL_MASK_CACHING) == 0) -+ flags |= TTM_PL_MASK_CACHING; -+ -+ bo->proposed_flags = flags; -+ bo->mem.proposed_flags = flags; -+ -+ /* -+ * For ttm_bo_type_device buffers, allocate -+ * address space from the device. -+ */ -+ -+ if (bo->type == ttm_bo_type_device) { -+ ret = ttm_bo_setup_vm(bo); -+ if (ret) -+ goto out_err; -+ } -+ -+ ret = ttm_buffer_object_validate(bo, interruptible, false); -+ if (ret) -+ goto out_err; -+ -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ return 0; -+ -+ out_err: -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ ttm_bo_unref(&bo); -+ -+ return ret; -+} -+ -+static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, -+ unsigned long num_pages) -+{ -+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & -+ PAGE_MASK; -+ -+ return bdev->ttm_bo_size + 2 * page_array_size; -+} -+ -+int ttm_buffer_object_create(struct ttm_bo_device *bdev, -+ unsigned long size, -+ enum ttm_bo_type type, -+ uint32_t flags, -+ uint32_t page_alignment, -+ unsigned long buffer_start, -+ bool interruptible, -+ struct file *persistant_swap_storage, -+ struct ttm_buffer_object **p_bo) -+{ -+ struct ttm_buffer_object *bo; -+ int ret; -+ struct ttm_mem_global *mem_glob = bdev->mem_glob; -+ -+ size_t acc_size = -+ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); -+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ bo = kzalloc(sizeof(*bo), GFP_KERNEL); -+ -+ if (unlikely(bo == NULL)) { -+ ttm_mem_global_free(mem_glob, acc_size, false); -+ return -ENOMEM; -+ } -+ -+ ret = ttm_buffer_object_init(bdev, bo, size, type, flags, -+ page_alignment, buffer_start, -+ interruptible, -+ persistant_swap_storage, acc_size, NULL); -+ if (likely(ret == 0)) -+ *p_bo = bo; -+ -+ return ret; -+} -+ -+static int ttm_bo_leave_list(struct ttm_buffer_object *bo, -+ uint32_t mem_type, bool allow_errors) -+{ -+ int ret; -+ -+ mutex_lock(&bo->mutex); -+ -+ ret = ttm_bo_expire_sync_obj(bo, allow_errors); -+ if (ret) -+ goto out; -+ -+ if (bo->mem.mem_type == mem_type) -+ ret = ttm_bo_evict(bo, mem_type, false, false); -+ -+ if (ret) { -+ if (allow_errors) { -+ goto out; -+ } else { -+ ret = 0; -+ printk(KERN_ERR "Cleanup eviction failed\n"); -+ } -+ } -+ -+ out: -+ mutex_unlock(&bo->mutex); -+ return ret; -+} -+ -+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, -+ struct list_head *head, -+ unsigned mem_type, bool allow_errors) -+{ -+ struct ttm_buffer_object *entry; -+ int ret; -+ int put_count; -+ -+ /* -+ * Can't use standard list traversal since we're unlocking. -+ */ -+ -+ spin_lock(&bdev->lru_lock); -+ -+ while (!list_empty(head)) { -+ entry = list_first_entry(head, struct ttm_buffer_object, lru); -+ kref_get(&entry->list_kref); -+ ret = ttm_bo_reserve_locked(entry, false, false, false, 0); -+ put_count = ttm_bo_del_from_lru(entry); -+ spin_unlock(&bdev->lru_lock); -+ while (put_count--) -+ kref_put(&entry->list_kref, ttm_bo_ref_bug); -+ BUG_ON(ret); -+ ret = ttm_bo_leave_list(entry, mem_type, allow_errors); -+ ttm_bo_unreserve(entry); -+ kref_put(&entry->list_kref, ttm_bo_release_list); -+ spin_lock(&bdev->lru_lock); -+ } -+ -+ spin_unlock(&bdev->lru_lock); -+ -+ return 0; -+} -+ -+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem_type]; -+ int ret = -EINVAL; -+ -+ if (mem_type >= TTM_NUM_MEM_TYPES) { -+ printk(KERN_ERR "Illegal memory type %d\n", mem_type); -+ return ret; -+ } -+ -+ if (!man->has_type) { -+ printk(KERN_ERR "Trying to take down uninitialized " -+ "memory manager type %u\n", mem_type); -+ return ret; -+ } -+ -+ man->use_type = false; -+ man->has_type = false; -+ -+ ret = 0; -+ if (mem_type > 0) { -+ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); -+ -+ spin_lock(&bdev->lru_lock); -+ if (drm_mm_clean(&man->manager)) { -+ drm_mm_takedown(&man->manager); -+ } else { -+ ret = -EBUSY; -+ } -+ spin_unlock(&bdev->lru_lock); -+ } -+ -+ return ret; -+} -+ -+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem_type]; -+ -+ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { -+ printk(KERN_ERR "Illegal memory manager memory type %u.\n", -+ mem_type); -+ return -EINVAL; -+ } -+ -+ if (!man->has_type) { -+ printk(KERN_ERR "Memory type %u has not been initialized.\n", -+ mem_type); -+ return 0; -+ } -+ -+ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); -+} -+ -+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, -+ unsigned long p_offset, unsigned long p_size) -+{ -+ int ret = -EINVAL; -+ struct ttm_mem_type_manager *man; -+ -+ if (type >= TTM_NUM_MEM_TYPES) { -+ printk(KERN_ERR "Illegal memory type %d\n", type); -+ return ret; -+ } -+ -+ man = &bdev->man[type]; -+ if (man->has_type) { -+ printk(KERN_ERR -+ "Memory manager already initialized for type %d\n", -+ type); -+ return ret; -+ } -+ -+ ret = bdev->driver->init_mem_type(bdev, type, man); -+ if (ret) -+ return ret; -+ -+ ret = 0; -+ if (type != TTM_PL_SYSTEM) { -+ if (!p_size) { -+ printk(KERN_ERR "Zero size memory manager type %d\n", -+ type); -+ return ret; -+ } -+ ret = drm_mm_init(&man->manager, p_offset, p_size); -+ if (ret) -+ return ret; -+ } -+ man->has_type = true; -+ man->use_type = true; -+ man->size = p_size; -+ -+ INIT_LIST_HEAD(&man->lru); -+ -+ return 0; -+} -+ -+int ttm_bo_device_release(struct ttm_bo_device *bdev) -+{ -+ int ret = 0; -+ unsigned i = TTM_NUM_MEM_TYPES; -+ struct ttm_mem_type_manager *man; -+ -+ while (i--) { -+ man = &bdev->man[i]; -+ if (man->has_type) { -+ man->use_type = false; -+ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { -+ ret = -EBUSY; -+ printk(KERN_ERR "DRM memory manager type %d " -+ "is not clean.\n", i); -+ } -+ man->has_type = false; -+ } -+ } -+ -+ if (!cancel_delayed_work(&bdev->wq)) -+ flush_scheduled_work(); -+ -+ while (ttm_bo_delayed_delete(bdev, true)) ; -+ -+ spin_lock(&bdev->lru_lock); -+ if (list_empty(&bdev->ddestroy)) -+ TTM_DEBUG("Delayed destroy list was clean\n"); -+ -+ if (list_empty(&bdev->man[0].lru)) -+ TTM_DEBUG("Swap list was clean\n"); -+ spin_unlock(&bdev->lru_lock); -+ -+ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); -+ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); -+ write_lock(&bdev->vm_lock); -+ drm_mm_takedown(&bdev->addr_space_mm); -+ write_unlock(&bdev->vm_lock); -+ -+ __free_page(bdev->dummy_read_page); -+ return ret; -+} -+ -+/* -+ * This function is intended to be called on drm driver load. -+ * If you decide to call it from firstopen, you must protect the call -+ * from a potentially racing ttm_bo_driver_finish in lastclose. -+ * (This may happen on X server restart). -+ */ -+ -+int ttm_bo_device_init(struct ttm_bo_device *bdev, -+ struct ttm_mem_global *mem_glob, -+ struct ttm_bo_driver *driver, uint64_t file_page_offset) -+{ -+ int ret = -EINVAL; -+ -+ bdev->dummy_read_page = NULL; -+ rwlock_init(&bdev->vm_lock); -+ spin_lock_init(&bdev->lru_lock); -+ -+ bdev->driver = driver; -+ bdev->mem_glob = mem_glob; -+ -+ memset(bdev->man, 0, sizeof(bdev->man)); -+ -+ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); -+ if (unlikely(bdev->dummy_read_page == NULL)) { -+ ret = -ENOMEM; -+ goto out_err0; -+ } -+ -+ /* -+ * Initialize the system memory buffer type. -+ * Other types need to be driver / IOCTL initialized. -+ */ -+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); -+ if (unlikely(ret != 0)) -+ goto out_err1; -+ -+ bdev->addr_space_rb = RB_ROOT; -+ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); -+ if (unlikely(ret != 0)) -+ goto out_err2; -+ -+ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); -+ bdev->nice_mode = true; -+ INIT_LIST_HEAD(&bdev->ddestroy); -+ INIT_LIST_HEAD(&bdev->swap_lru); -+ bdev->dev_mapping = NULL; -+ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); -+ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); -+ if (unlikely(ret != 0)) { -+ printk(KERN_ERR "Could not register buffer object swapout.\n"); -+ goto out_err2; -+ } -+ return 0; -+ out_err2: -+ ttm_bo_clean_mm(bdev, 0); -+ out_err1: -+ __free_page(bdev->dummy_read_page); -+ out_err0: -+ return ret; -+} -+ -+/* -+ * buffer object vm functions. -+ */ -+ -+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ -+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { -+ if (mem->mem_type == TTM_PL_SYSTEM) -+ return false; -+ -+ if (man->flags & TTM_MEMTYPE_FLAG_CMA) -+ return false; -+ -+ if (mem->flags & TTM_PL_FLAG_CACHED) -+ return false; -+ } -+ return true; -+} -+ -+int ttm_bo_pci_offset(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem, -+ unsigned long *bus_base, -+ unsigned long *bus_offset, unsigned long *bus_size) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ -+ *bus_size = 0; -+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) -+ return -EINVAL; -+ -+ if (ttm_mem_reg_is_pci(bdev, mem)) { -+ *bus_offset = mem->mm_node->start << PAGE_SHIFT; -+ *bus_size = mem->num_pages << PAGE_SHIFT; -+ *bus_base = man->io_offset; -+ } -+ -+ return 0; -+} -+ -+/** -+ * \c Kill all user-space virtual mappings of this buffer object. -+ * -+ * \param bo The buffer object. -+ * -+ * Call bo->mutex locked. -+ */ -+ -+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ loff_t offset = (loff_t) bo->addr_space_offset; -+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; -+ -+ if (!bdev->dev_mapping) -+ return; -+ -+ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); -+} -+ -+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct rb_node **cur = &bdev->addr_space_rb.rb_node; -+ struct rb_node *parent = NULL; -+ struct ttm_buffer_object *cur_bo; -+ unsigned long offset = bo->vm_node->start; -+ unsigned long cur_offset; -+ -+ while (*cur) { -+ parent = *cur; -+ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); -+ cur_offset = cur_bo->vm_node->start; -+ if (offset < cur_offset) -+ cur = &parent->rb_left; -+ else if (offset > cur_offset) -+ cur = &parent->rb_right; -+ else -+ BUG(); -+ } -+ -+ rb_link_node(&bo->vm_rb, parent, cur); -+ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); -+} -+ -+/** -+ * ttm_bo_setup_vm: -+ * -+ * @bo: the buffer to allocate address space for -+ * -+ * Allocate address space in the drm device so that applications -+ * can mmap the buffer and access the contents. This only -+ * applies to ttm_bo_type_device objects as others are not -+ * placed in the drm device address space. -+ */ -+ -+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ int ret; -+ -+ retry_pre_get: -+ ret = drm_mm_pre_get(&bdev->addr_space_mm); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ write_lock(&bdev->vm_lock); -+ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, -+ bo->mem.num_pages, 0, 0); -+ -+ if (unlikely(bo->vm_node == NULL)) { -+ ret = -ENOMEM; -+ goto out_unlock; -+ } -+ -+ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, -+ bo->mem.num_pages, 0); -+ -+ if (unlikely(bo->vm_node == NULL)) { -+ write_unlock(&bdev->vm_lock); -+ goto retry_pre_get; -+ } -+ -+ ttm_bo_vm_insert_rb(bo); -+ write_unlock(&bdev->vm_lock); -+ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; -+ -+ return 0; -+ out_unlock: -+ write_unlock(&bdev->vm_lock); -+ return ret; -+} -+ -+int ttm_bo_wait(struct ttm_buffer_object *bo, -+ bool lazy, bool interruptible, bool no_wait) -+{ -+ struct ttm_bo_driver *driver = bo->bdev->driver; -+ void *sync_obj; -+ void *sync_obj_arg; -+ int ret = 0; -+ -+ while (bo->sync_obj) { -+ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ goto out; -+ } -+ if (no_wait) { -+ ret = -EBUSY; -+ goto out; -+ } -+ sync_obj = driver->sync_obj_ref(bo->sync_obj); -+ sync_obj_arg = bo->sync_obj_arg; -+ mutex_unlock(&bo->mutex); -+ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, -+ lazy, interruptible); -+ -+ mutex_lock(&bo->mutex); -+ if (unlikely(ret != 0)) { -+ driver->sync_obj_unref(&sync_obj); -+ return ret; -+ } -+ -+ if (bo->sync_obj == sync_obj) { -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING; -+ } -+ driver->sync_obj_unref(&sync_obj); -+ } -+ out: -+ return 0; -+} -+ -+void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) -+{ -+ atomic_set(&bo->reserved, 0); -+ wake_up_all(&bo->event_queue); -+} -+ -+int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, -+ bool no_wait) -+{ -+ int ret; -+ -+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { -+ if (no_wait) -+ return -EBUSY; -+ else if (interruptible) { -+ ret = wait_event_interruptible -+ (bo->event_queue, atomic_read(&bo->reserved) == 0); -+ if (unlikely(ret != 0)) -+ return -ERESTART; -+ } else { -+ wait_event(bo->event_queue, -+ atomic_read(&bo->reserved) == 0); -+ } -+ } -+ return 0; -+} -+ -+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) -+{ -+ int ret = 0; -+ -+ /* -+ * Using ttm_bo_reserve instead of ttm_bo_block_reservation -+ * makes sure the lru lists are updated. -+ */ -+ -+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); -+ if (unlikely(ret != 0)) -+ return ret; -+ mutex_lock(&bo->mutex); -+ ret = ttm_bo_wait(bo, false, true, no_wait); -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ atomic_inc(&bo->cpu_writers); -+ out_err0: -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ return ret; -+} -+ -+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) -+{ -+ if (atomic_dec_and_test(&bo->cpu_writers)) -+ wake_up_all(&bo->event_queue); -+} -+ -+/** -+ * A buffer object shrink method that tries to swap out the first -+ * buffer object on the bo_global::swap_lru list. -+ */ -+ -+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) -+{ -+ struct ttm_bo_device *bdev = -+ container_of(shrink, struct ttm_bo_device, shrink); -+ struct ttm_buffer_object *bo; -+ int ret = -EBUSY; -+ int put_count; -+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); -+ -+ spin_lock(&bdev->lru_lock); -+ while (ret == -EBUSY) { -+ if (unlikely(list_empty(&bdev->swap_lru))) { -+ spin_unlock(&bdev->lru_lock); -+ return -EBUSY; -+ } -+ -+ bo = list_first_entry(&bdev->swap_lru, -+ struct ttm_buffer_object, swap); -+ kref_get(&bo->list_kref); -+ -+ /** -+ * Reserve buffer. Since we unlock while sleeping, we need -+ * to re-check that nobody removed us from the swap-list while -+ * we slept. -+ */ -+ -+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0); -+ if (unlikely(ret == -EBUSY)) { -+ spin_unlock(&bdev->lru_lock); -+ ttm_bo_wait_unreserved(bo, false); -+ kref_put(&bo->list_kref, ttm_bo_release_list); -+ spin_lock(&bdev->lru_lock); -+ } -+ } -+ -+ BUG_ON(ret != 0); -+ put_count = ttm_bo_del_from_lru(bo); -+ spin_unlock(&bdev->lru_lock); -+ -+ while (put_count--) -+ kref_put(&bo->list_kref, ttm_bo_ref_bug); -+ -+ /** -+ * Wait for GPU, then move to system cached. -+ */ -+ -+ mutex_lock(&bo->mutex); -+ ret = ttm_bo_wait(bo, false, false, false); -+ if (unlikely(ret != 0)) -+ goto out; -+ -+ if ((bo->mem.flags & swap_placement) != swap_placement) { -+ struct ttm_mem_reg evict_mem; -+ -+ evict_mem = bo->mem; -+ evict_mem.mm_node = NULL; -+ evict_mem.proposed_flags = -+ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; -+ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; -+ evict_mem.mem_type = TTM_PL_SYSTEM; -+ -+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false); -+ if (unlikely(ret != 0)) -+ goto out; -+ } -+ -+ ttm_bo_unmap_virtual(bo); -+ -+ /** -+ * Swap out. Buffer will be swapped in again as soon as -+ * anyone tries to access a ttm page. -+ */ -+ -+ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); -+ out: -+ mutex_unlock(&bo->mutex); -+ -+ /** -+ * -+ * Unreserve without putting on LRU to avoid swapping out an -+ * already swapped buffer. -+ */ -+ -+ atomic_set(&bo->reserved, 0); -+ wake_up_all(&bo->event_queue); -+ kref_put(&bo->list_kref, ttm_bo_release_list); -+ return ret; -+} -+ -+void ttm_bo_swapout_all(struct ttm_bo_device *bdev) -+{ -+ while (ttm_bo_swapout(&bdev->shrink) == 0) ; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h ---- a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,859 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+#ifndef _TTM_BO_DRIVER_H_ -+#define _TTM_BO_DRIVER_H_ -+ -+#include "ttm/ttm_bo_api.h" -+#include "ttm/ttm_memory.h" -+#include <drm/drm_mm.h> -+#include "linux/workqueue.h" -+#include "linux/fs.h" -+#include "linux/spinlock.h" -+ -+struct ttm_backend; -+ -+struct ttm_backend_func { -+ /** -+ * struct ttm_backend_func member populate -+ * -+ * @backend: Pointer to a struct ttm_backend. -+ * @num_pages: Number of pages to populate. -+ * @pages: Array of pointers to ttm pages. -+ * @dummy_read_page: Page to be used instead of NULL pages in the -+ * array @pages. -+ * -+ * Populate the backend with ttm pages. Depending on the backend, -+ * it may or may not copy the @pages array. -+ */ -+ int (*populate) (struct ttm_backend * backend, -+ unsigned long num_pages, struct page ** pages, -+ struct page * dummy_read_page); -+ /** -+ * struct ttm_backend_func member clear -+ * -+ * @backend: Pointer to a struct ttm_backend. -+ * -+ * This is an "unpopulate" function. Release all resources -+ * allocated with populate. -+ */ -+ void (*clear) (struct ttm_backend * backend); -+ -+ /** -+ * struct ttm_backend_func member bind -+ * -+ * @backend: Pointer to a struct ttm_backend. -+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the -+ * memory type and location for binding. -+ * -+ * Bind the backend pages into the aperture in the location -+ * indicated by @bo_mem. This function should be able to handle -+ * differences between aperture- and system page sizes. -+ */ -+ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem); -+ -+ /** -+ * struct ttm_backend_func member unbind -+ * -+ * @backend: Pointer to a struct ttm_backend. -+ * -+ * Unbind previously bound backend pages. This function should be -+ * able to handle differences between aperture- and system page sizes. -+ */ -+ int (*unbind) (struct ttm_backend * backend); -+ -+ /** -+ * struct ttm_backend_func member destroy -+ * -+ * @backend: Pointer to a struct ttm_backend. -+ * -+ * Destroy the backend. -+ */ -+ void (*destroy) (struct ttm_backend * backend); -+}; -+ -+/** -+ * struct ttm_backend -+ * -+ * @bdev: Pointer to a struct ttm_bo_device. -+ * @flags: For driver use. -+ * @func: Pointer to a struct ttm_backend_func that describes -+ * the backend methods. -+ * -+ */ -+ -+struct ttm_backend { -+ struct ttm_bo_device *bdev; -+ uint32_t flags; -+ struct ttm_backend_func *func; -+}; -+ -+#define TTM_PAGE_FLAG_VMALLOC (1 << 0) -+#define TTM_PAGE_FLAG_USER (1 << 1) -+#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2) -+#define TTM_PAGE_FLAG_WRITE (1 << 3) -+#define TTM_PAGE_FLAG_SWAPPED (1 << 4) -+#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) -+ -+enum ttm_caching_state { -+ tt_uncached, -+ tt_wc, -+ tt_cached -+}; -+ -+/** -+ * struct ttm_tt -+ * -+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL -+ * pointer. -+ * @pages: Array of pages backing the data. -+ * @first_himem_page: Himem pages are put last in the page array, which -+ * enables us to run caching attribute changes on only the first part -+ * of the page array containing lomem pages. This is the index of the -+ * first himem page. -+ * @last_lomem_page: Index of the last lomem page in the page array. -+ * @num_pages: Number of pages in the page array. -+ * @bdev: Pointer to the current struct ttm_bo_device. -+ * @be: Pointer to the ttm backend. -+ * @tsk: The task for user ttm. -+ * @start: virtual address for user ttm. -+ * @swap_storage: Pointer to shmem struct file for swap storage. -+ * @caching_state: The current caching state of the pages. -+ * @state: The current binding state of the pages. -+ * -+ * This is a structure holding the pages, caching- and aperture binding -+ * status for a buffer object that isn't backed by fixed (VRAM / AGP) -+ * memory. -+ */ -+ -+struct ttm_tt { -+ struct page *dummy_read_page; -+ struct page **pages; -+ long first_himem_page; -+ long last_lomem_page; -+ uint32_t page_flags; -+ unsigned long num_pages; -+ struct ttm_bo_device *bdev; -+ struct ttm_backend *be; -+ struct task_struct *tsk; -+ unsigned long start; -+ struct file *swap_storage; -+ enum ttm_caching_state caching_state; -+ enum { -+ tt_bound, -+ tt_unbound, -+ tt_unpopulated, -+ } state; -+}; -+ -+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ -+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ -+#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap -+ before kernel access. */ -+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ -+ -+/** -+ * struct ttm_mem_type_manager -+ * -+ * @has_type: The memory type has been initialized. -+ * @use_type: The memory type is enabled. -+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory -+ * managed by this memory type. -+ * @gpu_offset: If used, the GPU offset of the first managed page of -+ * fixed memory or the first managed location in an aperture. -+ * @io_offset: The io_offset of the first managed page of IO memory or -+ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA -+ * memory, this should be set to NULL. -+ * @io_size: The size of a managed IO region (fixed memory or aperture). -+ * @io_addr: Virtual kernel address if the io region is pre-mapped. For -+ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and -+ * @io_addr should be set to NULL. -+ * @size: Size of the managed region. -+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, -+ * as defined in ttm_placement_common.h -+ * @default_caching: The default caching policy used for a buffer object -+ * placed in this memory type if the user doesn't provide one. -+ * @manager: The range manager used for this memory type. FIXME: If the aperture -+ * has a page size different from the underlying system, the granularity -+ * of this manager should take care of this. But the range allocating code -+ * in ttm_bo.c needs to be modified for this. -+ * @lru: The lru list for this memory type. -+ * -+ * This structure is used to identify and manage memory types for a device. -+ * It's set up by the ttm_bo_driver::init_mem_type method. -+ */ -+ -+struct ttm_mem_type_manager { -+ -+ /* -+ * No protection. Constant from start. -+ */ -+ -+ bool has_type; -+ bool use_type; -+ uint32_t flags; -+ unsigned long gpu_offset; -+ unsigned long io_offset; -+ unsigned long io_size; -+ void *io_addr; -+ uint64_t size; -+ uint32_t available_caching; -+ uint32_t default_caching; -+ -+ /* -+ * Protected by the bdev->lru_lock. -+ * TODO: Consider one lru_lock per ttm_mem_type_manager. -+ * Plays ill with list removal, though. -+ */ -+ -+ struct drm_mm manager; -+ struct list_head lru; -+}; -+ -+/** -+ * struct ttm_bo_driver -+ * -+ * @mem_type_prio: Priority array of memory types to place a buffer object in -+ * if it fits without evicting buffers from any of these memory types. -+ * @mem_busy_prio: Priority array of memory types to place a buffer object in -+ * if it needs to evict buffers to make room. -+ * @num_mem_type_prio: Number of elements in the @mem_type_prio array. -+ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array. -+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend. -+ * @invalidate_caches: Callback to invalidate read caches when a buffer object -+ * has been evicted. -+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure. -+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted. -+ * @move: Callback for a driver to hook in accelerated functions to move a buffer. -+ * If set to NULL, a potentially slow memcpy() move is used. -+ * @sync_obj_signaled: See ttm_fence_api.h -+ * @sync_obj_wait: See ttm_fence_api.h -+ * @sync_obj_flush: See ttm_fence_api.h -+ * @sync_obj_unref: See ttm_fence_api.h -+ * @sync_obj_ref: See ttm_fence_api.h -+ */ -+ -+struct ttm_bo_driver { -+ const uint32_t *mem_type_prio; -+ const uint32_t *mem_busy_prio; -+ uint32_t num_mem_type_prio; -+ uint32_t num_mem_busy_prio; -+ -+ /** -+ * struct ttm_bo_driver member create_ttm_backend_entry -+ * -+ * @bdev: The buffer object device. -+ * -+ * Create a driver specific struct ttm_backend. -+ */ -+ -+ struct ttm_backend *(*create_ttm_backend_entry) -+ (struct ttm_bo_device * bdev); -+ -+ /** -+ * struct ttm_bo_driver member invalidate_caches -+ * -+ * @bdev: the buffer object device. -+ * @flags: new placement of the rebound buffer object. -+ * -+ * A previosly evicted buffer has been rebound in a -+ * potentially new location. Tell the driver that it might -+ * consider invalidating read (texture) caches on the next command -+ * submission as a consequence. -+ */ -+ -+ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags); -+ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type, -+ struct ttm_mem_type_manager * man); -+ /** -+ * struct ttm_bo_driver member evict_flags: -+ * -+ * @bo: the buffer object to be evicted -+ * -+ * Return the bo flags for a buffer which is not mapped to the hardware. -+ * These will be placed in proposed_flags so that when the move is -+ * finished, they'll end up in bo->mem.flags -+ */ -+ -+ uint32_t(*evict_flags) (struct ttm_buffer_object * bo); -+ /** -+ * struct ttm_bo_driver member move: -+ * -+ * @bo: the buffer to move -+ * @evict: whether this motion is evicting the buffer from -+ * the graphics address space -+ * @interruptible: Use interruptible sleeps if possible when sleeping. -+ * @no_wait: whether this should give up and return -EBUSY -+ * if this move would require sleeping -+ * @new_mem: the new memory region receiving the buffer -+ * -+ * Move a buffer between two memory regions. -+ */ -+ int (*move) (struct ttm_buffer_object * bo, -+ bool evict, bool interruptible, -+ bool no_wait, struct ttm_mem_reg * new_mem); -+ -+ /** -+ * struct ttm_bo_driver_member verify_access -+ * -+ * @bo: Pointer to a buffer object. -+ * @filp: Pointer to a struct file trying to access the object. -+ * -+ * Called from the map / write / read methods to verify that the -+ * caller is permitted to access the buffer object. -+ * This member may be set to NULL, which will refuse this kind of -+ * access for all buffer objects. -+ * This function should return 0 if access is granted, -EPERM otherwise. -+ */ -+ int (*verify_access) (struct ttm_buffer_object * bo, -+ struct file * filp); -+ -+ /** -+ * In case a driver writer dislikes the TTM fence objects, -+ * the driver writer can replace those with sync objects of -+ * his / her own. If it turns out that no driver writer is -+ * using these. I suggest we remove these hooks and plug in -+ * fences directly. The bo driver needs the following functionality: -+ * See the corresponding functions in the fence object API -+ * documentation. -+ */ -+ -+ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); -+ int (*sync_obj_wait) (void *sync_obj, void *sync_arg, -+ bool lazy, bool interruptible); -+ int (*sync_obj_flush) (void *sync_obj, void *sync_arg); -+ void (*sync_obj_unref) (void **sync_obj); -+ void *(*sync_obj_ref) (void *sync_obj); -+}; -+ -+#define TTM_NUM_MEM_TYPES 10 -+ -+#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */ -+#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs -+ idling before CPU mapping */ -+/** -+ * struct ttm_bo_device - Buffer object driver device-specific data. -+ * -+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. -+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. -+ * @count: Current number of buffer object. -+ * @pages: Current number of pinned pages. -+ * @dummy_read_page: Pointer to a dummy page used for mapping requests -+ * of unpopulated pages. -+ * @shrink: A shrink callback object used for buffre object swap. -+ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded) -+ * used by a buffer object. This is excluding page arrays and backing pages. -+ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object). -+ * @man: An array of mem_type_managers. -+ * @addr_space_mm: Range manager for the device address space. -+ * lru_lock: Spinlock that protects the buffer+device lru lists and -+ * ddestroy lists. -+ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. -+ * If a GPU lockup has been detected, this is forced to 0. -+ * @dev_mapping: A pointer to the struct address_space representing the -+ * device address space. -+ * @wq: Work queue structure for the delayed delete workqueue. -+ * -+ */ -+ -+struct ttm_bo_device { -+ -+ /* -+ * Constant after bo device init / atomic. -+ */ -+ -+ struct ttm_mem_global *mem_glob; -+ struct ttm_bo_driver *driver; -+ struct page *dummy_read_page; -+ struct ttm_mem_shrink shrink; -+ -+ size_t ttm_bo_extra_size; -+ size_t ttm_bo_size; -+ -+ rwlock_t vm_lock; -+ /* -+ * Protected by the vm lock. -+ */ -+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; -+ struct rb_root addr_space_rb; -+ struct drm_mm addr_space_mm; -+ -+ /* -+ * Might want to change this to one lock per manager. -+ */ -+ spinlock_t lru_lock; -+ /* -+ * Protected by the lru lock. -+ */ -+ struct list_head ddestroy; -+ struct list_head swap_lru; -+ -+ /* -+ * Protected by load / firstopen / lastclose /unload sync. -+ */ -+ -+ bool nice_mode; -+ struct address_space *dev_mapping; -+ -+ /* -+ * Internal protection. -+ */ -+ -+ struct delayed_work wq; -+}; -+ -+/** -+ * ttm_flag_masked -+ * -+ * @old: Pointer to the result and original value. -+ * @new: New value of bits. -+ * @mask: Mask of bits to change. -+ * -+ * Convenience function to change a number of bits identified by a mask. -+ */ -+ -+static inline uint32_t -+ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask) -+{ -+ *old ^= (*old ^ new) & mask; -+ return *old; -+} -+ -+/** -+ * ttm_tt_create -+ * -+ * @bdev: pointer to a struct ttm_bo_device: -+ * @size: Size of the data needed backing. -+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. -+ * @dummy_read_page: See struct ttm_bo_device. -+ * -+ * Create a struct ttm_tt to back data with system memory pages. -+ * No pages are actually allocated. -+ * Returns: -+ * NULL: Out of memory. -+ */ -+extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, -+ unsigned long size, -+ uint32_t page_flags, -+ struct page *dummy_read_page); -+ -+/** -+ * ttm_tt_set_user: -+ * -+ * @ttm: The struct ttm_tt to populate. -+ * @tsk: A struct task_struct for which @start is a valid user-space address. -+ * @start: A valid user-space address. -+ * @num_pages: Size in pages of the user memory area. -+ * -+ * Populate a struct ttm_tt with a user-space memory area after first pinning -+ * the pages backing it. -+ * Returns: -+ * !0: Error. -+ */ -+ -+extern int ttm_tt_set_user(struct ttm_tt *ttm, -+ struct task_struct *tsk, -+ unsigned long start, unsigned long num_pages); -+ -+/** -+ * ttm_ttm_bind: -+ * -+ * @ttm: The struct ttm_tt containing backing pages. -+ * @bo_mem: The struct ttm_mem_reg identifying the binding location. -+ * -+ * Bind the pages of @ttm to an aperture location identified by @bo_mem -+ */ -+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); -+ -+/** -+ * ttm_ttm_destroy: -+ * -+ * @ttm: The struct ttm_tt. -+ * -+ * Unbind, unpopulate and destroy a struct ttm_tt. -+ */ -+extern void ttm_tt_destroy(struct ttm_tt *ttm); -+ -+/** -+ * ttm_ttm_unbind: -+ * -+ * @ttm: The struct ttm_tt. -+ * -+ * Unbind a struct ttm_tt. -+ */ -+extern void ttm_tt_unbind(struct ttm_tt *ttm); -+ -+/** -+ * ttm_ttm_destroy: -+ * -+ * @ttm: The struct ttm_tt. -+ * @index: Index of the desired page. -+ * -+ * Return a pointer to the struct page backing @ttm at page -+ * index @index. If the page is unpopulated, one will be allocated to -+ * populate that index. -+ * -+ * Returns: -+ * NULL on OOM. -+ */ -+extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index); -+ -+/** -+ * ttm_tt_cache_flush: -+ * -+ * @pages: An array of pointers to struct page:s to flush. -+ * @num_pages: Number of pages to flush. -+ * -+ * Flush the data of the indicated pages from the cpu caches. -+ * This is used when changing caching attributes of the pages from -+ * cache-coherent. -+ */ -+extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); -+ -+/** -+ * ttm_tt_set_placement_caching: -+ * -+ * @ttm A struct ttm_tt the backing pages of which will change caching policy. -+ * @placement: Flag indicating the desired caching policy. -+ * -+ * This function will change caching policy of any default kernel mappings of -+ * the pages backing @ttm. If changing from cached to uncached or write-combined, -+ * all CPU caches will first be flushed to make sure the data of the pages -+ * hit RAM. This function may be very costly as it involves global TLB -+ * and cache flushes and potential page splitting / combining. -+ */ -+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); -+extern int ttm_tt_swapout(struct ttm_tt *ttm, -+ struct file *persistant_swap_storage); -+ -+/* -+ * ttm_bo.c -+ */ -+ -+/** -+ * ttm_mem_reg_is_pci -+ * -+ * @bdev: Pointer to a struct ttm_bo_device. -+ * @mem: A valid struct ttm_mem_reg. -+ * -+ * Returns true if the memory described by @mem is PCI memory, -+ * false otherwise. -+ */ -+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem); -+ -+/** -+ * ttm_bo_mem_space -+ * -+ * @bo: Pointer to a struct ttm_buffer_object. the data of which -+ * we want to allocate space for. -+ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set -+ * up. -+ * @interruptible: Sleep interruptible when sliping. -+ * @no_wait: Don't sleep waiting for space to become available. -+ * -+ * Allocate memory space for the buffer object pointed to by @bo, using -+ * the placement flags in @mem, potentially evicting other idle buffer objects. -+ * This function may sleep while waiting for space to become available. -+ * Returns: -+ * -EBUSY: No space available (only if no_wait == 1). -+ * -ENOMEM: Could not allocate memory for the buffer object, either due to -+ * fragmentation or concurrent allocators. -+ * -ERESTART: An interruptible sleep was interrupted by a signal. -+ */ -+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, -+ struct ttm_mem_reg *mem, -+ bool interruptible, bool no_wait); -+/** -+ * ttm_bo_wait_for_cpu -+ * -+ * @bo: Pointer to a struct ttm_buffer_object. -+ * @no_wait: Don't sleep while waiting. -+ * -+ * Wait until a buffer object is no longer sync'ed for CPU access. -+ * Returns: -+ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). -+ * -ERESTART: An interruptible sleep was interrupted by a signal. -+ */ -+ -+extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); -+ -+/** -+ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. -+ * -+ * @bo Pointer to a struct ttm_buffer_object. -+ * @bus_base On return the base of the PCI region -+ * @bus_offset On return the byte offset into the PCI region -+ * @bus_size On return the byte size of the buffer object or zero if -+ * the buffer object memory is not accessible through a PCI region. -+ * -+ * Returns: -+ * -EINVAL if the buffer object is currently not mappable. -+ * 0 otherwise. -+ */ -+ -+extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, -+ struct ttm_mem_reg *mem, -+ unsigned long *bus_base, -+ unsigned long *bus_offset, -+ unsigned long *bus_size); -+ -+extern int ttm_bo_device_release(struct ttm_bo_device *bdev); -+ -+/** -+ * ttm_bo_device_init -+ * -+ * @bdev: A pointer to a struct ttm_bo_device to initialize. -+ * @mem_global: A pointer to an initialized struct ttm_mem_global. -+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller. -+ * @file_page_offset: Offset into the device address space that is available -+ * for buffer data. This ensures compatibility with other users of the -+ * address space. -+ * -+ * Initializes a struct ttm_bo_device: -+ * Returns: -+ * !0: Failure. -+ */ -+extern int ttm_bo_device_init(struct ttm_bo_device *bdev, -+ struct ttm_mem_global *mem_glob, -+ struct ttm_bo_driver *driver, -+ uint64_t file_page_offset); -+ -+/** -+ * ttm_bo_reserve: -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @interruptible: Sleep interruptible if waiting. -+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. -+ * @use_sequence: If @bo is already reserved, Only sleep waiting for -+ * it to become unreserved if @sequence < (@bo)->sequence. -+ * -+ * Locks a buffer object for validation. (Or prevents other processes from -+ * locking it for validation) and removes it from lru lists, while taking -+ * a number of measures to prevent deadlocks. -+ * -+ * Deadlocks may occur when two processes try to reserve multiple buffers in -+ * different order, either by will or as a result of a buffer being evicted -+ * to make room for a buffer already reserved. (Buffers are reserved before -+ * they are evicted). The following algorithm prevents such deadlocks from -+ * occuring: -+ * 1) Buffers are reserved with the lru spinlock held. Upon successful -+ * reservation they are removed from the lru list. This stops a reserved buffer -+ * from being evicted. However the lru spinlock is released between the time -+ * a buffer is selected for eviction and the time it is reserved. -+ * Therefore a check is made when a buffer is reserved for eviction, that it -+ * is still the first buffer in the lru list, before it is removed from the -+ * list. @check_lru == 1 forces this check. If it fails, the function returns -+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat -+ * the procedure. -+ * 2) Processes attempting to reserve multiple buffers other than for eviction, -+ * (typically execbuf), should first obtain a unique 32-bit -+ * validation sequence number, -+ * and call this function with @use_sequence == 1 and @sequence == the unique -+ * sequence number. If upon call of this function, the buffer object is already -+ * reserved, the validation sequence is checked against the validation -+ * sequence of the process currently reserving the buffer, -+ * and if the current validation sequence is greater than that of the process -+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps -+ * waiting for the buffer to become unreserved, after which it retries reserving. -+ * The caller should, when receiving an -EAGAIN error -+ * release all its buffer reservations, wait for @bo to become unreserved, and -+ * then rerun the validation with the same validation sequence. This procedure -+ * will always guarantee that the process with the lowest validation sequence -+ * will eventually succeed, preventing both deadlocks and starvation. -+ * -+ * Returns: -+ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations, -+ * wait for @bo to become unreserved and try again. (only if use_sequence == 1). -+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by -+ * a signal. Release all buffer reservations and return to user-space. -+ */ -+extern int ttm_bo_reserve(struct ttm_buffer_object *bo, -+ bool interruptible, -+ bool no_wait, bool use_sequence, uint32_t sequence); -+ -+/** -+ * ttm_bo_unreserve -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * -+ * Unreserve a previous reservation of @bo. -+ */ -+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); -+ -+/** -+ * ttm_bo_wait_unreserved -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * -+ * Wait for a struct ttm_buffer_object to become unreserved. -+ * This is typically used in the execbuf code to relax cpu-usage when -+ * a potential deadlock condition backoff. -+ */ -+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, -+ bool interruptible); -+ -+/** -+ * ttm_bo_block_reservation -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @interruptible: Use interruptible sleep when waiting. -+ * @no_wait: Don't sleep, but rather return -EBUSY. -+ * -+ * Block reservation for validation by simply reserving the buffer. This is intended -+ * for single buffer use only without eviction, and thus needs no deadlock protection. -+ * -+ * Returns: -+ * -EBUSY: If no_wait == 1 and the buffer is already reserved. -+ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping. -+ */ -+extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, -+ bool interruptible, bool no_wait); -+ -+/** -+ * ttm_bo_unblock_reservation -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * -+ * Unblocks reservation leaving lru lists untouched. -+ */ -+extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); -+ -+/* -+ * ttm_bo_util.c -+ */ -+ -+/** -+ * ttm_bo_move_ttm -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @evict: 1: This is an eviction. Don't try to pipeline. -+ * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @new_mem: struct ttm_mem_reg indicating where to move. -+ * -+ * Optimized move function for a buffer object with both old and -+ * new placement backed by a TTM. The function will, if successful, -+ * free any old aperture space, and set (@new_mem)->mm_node to NULL, -+ * and update the (@bo)->mem placement flags. If unsuccessful, the old -+ * data remains untouched, and it's up to the caller to free the -+ * memory space indicated by @new_mem. -+ * Returns: -+ * !0: Failure. -+ */ -+ -+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, -+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem); -+ -+/** -+ * ttm_bo_move_memcpy -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @evict: 1: This is an eviction. Don't try to pipeline. -+ * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @new_mem: struct ttm_mem_reg indicating where to move. -+ * -+ * Fallback move function for a mappable buffer object in mappable memory. -+ * The function will, if successful, -+ * free any old aperture space, and set (@new_mem)->mm_node to NULL, -+ * and update the (@bo)->mem placement flags. If unsuccessful, the old -+ * data remains untouched, and it's up to the caller to free the -+ * memory space indicated by @new_mem. -+ * Returns: -+ * !0: Failure. -+ */ -+ -+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, -+ bool evict, -+ bool no_wait, struct ttm_mem_reg *new_mem); -+ -+/** -+ * ttm_bo_free_old_node -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * -+ * Utility function to free an old placement after a successful move. -+ */ -+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); -+ -+/** -+ * ttm_bo_move_accel_cleanup. -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @sync_obj: A sync object that signals when moving is complete. -+ * @sync_obj_arg: An argument to pass to the sync object idle / wait -+ * functions. -+ * @evict: This is an evict move. Don't return until the buffer is idle. -+ * @no_wait: Never sleep, but rather return with -EBUSY. -+ * @new_mem: struct ttm_mem_reg indicating where to move. -+ * -+ * Accelerated move function to be called when an accelerated move -+ * has been scheduled. The function will create a new temporary buffer object -+ * representing the old placement, and put the sync object on both buffer -+ * objects. After that the newly created buffer object is unref'd to be -+ * destroyed when the move is complete. This will help pipeline -+ * buffer moves. -+ */ -+ -+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, -+ void *sync_obj, -+ void *sync_obj_arg, -+ bool evict, bool no_wait, -+ struct ttm_mem_reg *new_mem); -+/** -+ * ttm_io_prot -+ * -+ * @c_state: Caching state. -+ * @tmp: Page protection flag for a normal, cached mapping. -+ * -+ * Utility function that returns the pgprot_t that should be used for -+ * setting up a PTE with the caching model indicated by @c_state. -+ */ -+extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp); -+ -+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) -+#define TTM_HAS_AGP -+#include <linux/agp_backend.h> -+ -+/** -+ * ttm_agp_backend_init -+ * -+ * @bdev: Pointer to a struct ttm_bo_device. -+ * @bridge: The agp bridge this device is sitting on. -+ * -+ * Create a TTM backend that uses the indicated AGP bridge as an aperture -+ * for TT memory. This function uses the linux agpgart interface to -+ * bind and unbind memory backing a ttm_tt. -+ */ -+extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev, -+ struct agp_bridge_data *bridge); -+#endif -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c ---- a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,529 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_placement_common.h" -+#include "ttm/ttm_pat_compat.h" -+#include <linux/io.h> -+#include <linux/highmem.h> -+#include <linux/wait.h> -+#include <linux/version.h> -+ -+void ttm_bo_free_old_node(struct ttm_buffer_object *bo) -+{ -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ -+ if (old_mem->mm_node) { -+ spin_lock(&bo->bdev->lru_lock); -+ drm_mm_put_block(old_mem->mm_node); -+ spin_unlock(&bo->bdev->lru_lock); -+ } -+ old_mem->mm_node = NULL; -+} -+ -+int ttm_bo_move_ttm(struct ttm_buffer_object *bo, -+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem) -+{ -+ struct ttm_tt *ttm = bo->ttm; -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ uint32_t save_flags = old_mem->flags; -+ uint32_t save_proposed_flags = old_mem->proposed_flags; -+ int ret; -+ -+ if (old_mem->mem_type != TTM_PL_SYSTEM) { -+ ttm_tt_unbind(ttm); -+ ttm_bo_free_old_node(bo); -+ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM, -+ TTM_PL_MASK_MEM); -+ old_mem->mem_type = TTM_PL_SYSTEM; -+ save_flags = old_mem->flags; -+ } -+ -+ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ if (new_mem->mem_type != TTM_PL_SYSTEM) { -+ ret = ttm_tt_bind(ttm, new_mem); -+ if (unlikely(ret != 0)) -+ return ret; -+ } -+ -+ *old_mem = *new_mem; -+ new_mem->mm_node = NULL; -+ old_mem->proposed_flags = save_proposed_flags; -+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); -+ return 0; -+} -+ -+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, -+ void **virtual) -+{ -+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; -+ unsigned long bus_offset; -+ unsigned long bus_size; -+ unsigned long bus_base; -+ int ret; -+ void *addr; -+ -+ *virtual = NULL; -+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); -+ if (ret || bus_size == 0) -+ return ret; -+ -+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) -+ addr = (void *)(((u8 *) man->io_addr) + bus_offset); -+ else { -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) -+ if (mem->flags & TTM_PL_FLAG_WC) -+ addr = ioremap_wc(bus_base + bus_offset, bus_size); -+ else -+ addr = ioremap_nocache(bus_base + bus_offset, bus_size); -+#else -+ addr = ioremap_nocache(bus_base + bus_offset, bus_size); -+#endif -+ if (!addr) -+ return -ENOMEM; -+ } -+ *virtual = addr; -+ return 0; -+} -+ -+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, -+ void *virtual) -+{ -+ struct ttm_mem_type_manager *man; -+ -+ man = &bdev->man[mem->mem_type]; -+ -+ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) -+ iounmap(virtual); -+} -+ -+static int ttm_copy_io_page(void *dst, void *src, unsigned long page) -+{ -+ uint32_t *dstP = -+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); -+ uint32_t *srcP = -+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); -+ -+ int i; -+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) -+ iowrite32(ioread32(srcP++), dstP++); -+ return 0; -+} -+ -+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, -+ unsigned long page) -+{ -+ struct page *d = ttm_tt_get_page(ttm, page); -+ void *dst; -+ -+ if (!d) -+ return -ENOMEM; -+ -+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); -+ dst = kmap(d); -+ if (!dst) -+ return -ENOMEM; -+ -+ memcpy_fromio(dst, src, PAGE_SIZE); -+ kunmap(d); -+ return 0; -+} -+ -+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, -+ unsigned long page) -+{ -+ struct page *s = ttm_tt_get_page(ttm, page); -+ void *src; -+ -+ if (!s) -+ return -ENOMEM; -+ -+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); -+ src = kmap(s); -+ if (!src) -+ return -ENOMEM; -+ -+ memcpy_toio(dst, src, PAGE_SIZE); -+ kunmap(s); -+ return 0; -+} -+ -+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, -+ bool evict, bool no_wait, struct ttm_mem_reg *new_mem) -+{ -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; -+ struct ttm_tt *ttm = bo->ttm; -+ struct ttm_mem_reg *old_mem = &bo->mem; -+ struct ttm_mem_reg old_copy = *old_mem; -+ void *old_iomap; -+ void *new_iomap; -+ int ret; -+ uint32_t save_flags = old_mem->flags; -+ uint32_t save_proposed_flags = old_mem->proposed_flags; -+ unsigned long i; -+ unsigned long page; -+ unsigned long add = 0; -+ int dir; -+ -+ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); -+ if (ret) -+ return ret; -+ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); -+ if (ret) -+ goto out; -+ -+ if (old_iomap == NULL && new_iomap == NULL) -+ goto out2; -+ if (old_iomap == NULL && ttm == NULL) -+ goto out2; -+ -+ add = 0; -+ dir = 1; -+ -+ if ((old_mem->mem_type == new_mem->mem_type) && -+ (new_mem->mm_node->start < -+ old_mem->mm_node->start + old_mem->mm_node->size)) { -+ dir = -1; -+ add = new_mem->num_pages - 1; -+ } -+ -+ for (i = 0; i < new_mem->num_pages; ++i) { -+ page = i * dir + add; -+ if (old_iomap == NULL) -+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); -+ else if (new_iomap == NULL) -+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); -+ else -+ ret = ttm_copy_io_page(new_iomap, old_iomap, page); -+ if (ret) -+ goto out1; -+ } -+ mb(); -+ out2: -+ ttm_bo_free_old_node(bo); -+ -+ *old_mem = *new_mem; -+ new_mem->mm_node = NULL; -+ old_mem->proposed_flags = save_proposed_flags; -+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); -+ -+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { -+ ttm_tt_unbind(ttm); -+ ttm_tt_destroy(ttm); -+ bo->ttm = NULL; -+ } -+ -+ out1: -+ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); -+ out: -+ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); -+ return ret; -+} -+ -+/** -+ * ttm_buffer_object_transfer -+ * -+ * @bo: A pointer to a struct ttm_buffer_object. -+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, -+ * holding the data of @bo with the old placement. -+ * -+ * This is a utility function that may be called after an accelerated move -+ * has been scheduled. A new buffer object is created as a placeholder for -+ * the old data while it's being copied. When that buffer object is idle, -+ * it can be destroyed, releasing the space of the old placement. -+ * Returns: -+ * !0: Failure. -+ */ -+ -+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, -+ struct ttm_buffer_object **new_obj) -+{ -+ struct ttm_buffer_object *fbo; -+ struct ttm_bo_device *bdev = bo->bdev; -+ struct ttm_bo_driver *driver = bdev->driver; -+ -+ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); -+ if (!fbo) -+ return -ENOMEM; -+ -+ *fbo = *bo; -+ mutex_init(&fbo->mutex); -+ mutex_lock(&fbo->mutex); -+ -+ init_waitqueue_head(&fbo->event_queue); -+ INIT_LIST_HEAD(&fbo->ddestroy); -+ INIT_LIST_HEAD(&fbo->lru); -+ -+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); -+ if (fbo->mem.mm_node) -+ fbo->mem.mm_node->private = (void *)fbo; -+ kref_init(&fbo->list_kref); -+ kref_init(&fbo->kref); -+ -+ mutex_unlock(&fbo->mutex); -+ -+ *new_obj = fbo; -+ return 0; -+} -+ -+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) -+{ -+#if defined(__i386__) || defined(__x86_64__) -+ if (caching_flags & TTM_PL_FLAG_WC) { -+ tmp = pgprot_ttm_x86_wc(tmp); -+ } else if (boot_cpu_data.x86 > 3) { -+ tmp = pgprot_noncached(tmp); -+ } -+#elif defined(__powerpc__) -+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) { -+ pgprot_val(tmp) |= _PAGE_NO_CACHE; -+ if (caching_flags & TTM_PL_FLAG_UNCACHED) -+ pgprot_val(tmp) |= _PAGE_GUARDED; -+ } -+#endif -+#if defined(__ia64__) -+ if (caching_flags & TTM_PL_FLAG_WC) -+ tmp = pgprot_writecombine(tmp); -+ else -+ tmp = pgprot_noncached(tmp); -+#endif -+#if defined(__sparc__) -+ if (!(caching_flags & TTM_PL_FLAG_CACHED)) -+ tmp = pgprot_noncached(tmp); -+#endif -+ return tmp; -+} -+ -+static int ttm_bo_ioremap(struct ttm_buffer_object *bo, -+ unsigned long bus_base, -+ unsigned long bus_offset, -+ unsigned long bus_size, -+ struct ttm_bo_kmap_obj *map) -+{ -+ struct ttm_bo_device * bdev = bo->bdev; -+ struct ttm_mem_reg * mem = &bo->mem; -+ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type]; -+ -+ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { -+ map->bo_kmap_type = ttm_bo_map_premapped; -+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else { -+ map->bo_kmap_type = ttm_bo_map_iomap; -+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) -+ if (mem->flags & TTM_PL_FLAG_WC) -+ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size); -+ else -+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); -+#else -+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); -+#endif -+ } -+ return (!map->virtual) ? -ENOMEM : 0; -+} -+ -+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, -+ unsigned long start_page, -+ unsigned long num_pages, -+ struct ttm_bo_kmap_obj *map) -+{ -+ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot; -+ struct ttm_tt * ttm = bo->ttm; -+ struct page * d; -+ int i; -+ BUG_ON(!ttm); -+ if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) { -+ /* -+ * We're mapping a single page, and the desired -+ * page protection is consistent with the bo. -+ */ -+ map->bo_kmap_type = ttm_bo_map_kmap; -+ map->page = ttm_tt_get_page(ttm, start_page); -+ map->virtual = kmap(map->page); -+ } else { -+ /* -+ * Populate the part we're mapping; -+ */ -+ for (i = start_page; i < start_page + num_pages; ++i) { -+ d = ttm_tt_get_page(ttm, i); if (!d) -+ return -ENOMEM; -+ } -+ -+ /* -+ * We need to use vmap to get the desired page protection -+ * or to make the buffer object look contigous. -+ */ -+ prot = (mem->flags & TTM_PL_FLAG_CACHED) ? -+ PAGE_KERNEL : -+ ttm_io_prot(mem->flags, PAGE_KERNEL); -+ map->bo_kmap_type = ttm_bo_map_vmap; -+ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot); -+ } -+ return (!map->virtual) ? -ENOMEM : 0; -+} -+ -+int ttm_bo_kmap(struct ttm_buffer_object *bo, -+ unsigned long start_page, unsigned long num_pages, -+ struct ttm_bo_kmap_obj *map) -+{ -+ int ret; -+ unsigned long bus_base; -+ unsigned long bus_offset; -+ unsigned long bus_size; -+ BUG_ON(!list_empty(&bo->swap)); -+ map->virtual = NULL; -+ if (num_pages > bo->num_pages) -+ return -EINVAL; -+ if (start_page > bo->num_pages) -+ return -EINVAL; -+#if 0 -+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) -+ return -EPERM; -+#endif -+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, -+ &bus_offset, &bus_size); -+ if (ret) -+ return ret; -+ if (bus_size == 0) { -+ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); -+ } else { -+ bus_offset += start_page << PAGE_SHIFT; -+ bus_size = num_pages << PAGE_SHIFT; -+ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); -+ } -+} -+ -+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) -+{ -+ if (!map->virtual) -+ return; -+ switch (map->bo_kmap_type) { -+ case ttm_bo_map_iomap: -+ iounmap(map->virtual); -+ break; -+ case ttm_bo_map_vmap: -+ vunmap(map->virtual); -+ break; -+ case ttm_bo_map_kmap: -+ kunmap(map->page); -+ break; -+ case ttm_bo_map_premapped: -+ break; -+ default: -+ BUG(); -+ } -+ map->virtual = NULL; -+ map->page = NULL; -+} -+ -+int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, -+ unsigned long dst_offset, -+ unsigned long *pfn, pgprot_t * prot) -+{ -+ struct ttm_mem_reg * mem = &bo->mem; -+ struct ttm_bo_device * bdev = bo->bdev; -+ unsigned long bus_offset; -+ unsigned long bus_size; -+ unsigned long bus_base; -+ int ret; -+ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, -+ &bus_size); -+ if (ret) -+ return -EINVAL; -+ if (bus_size != 0) -+ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; -+ else -+ if (!bo->ttm) -+ return -EINVAL; -+ else -+ *pfn = -+ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT)); -+ *prot = -+ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem-> -+ flags, -+ PAGE_KERNEL); -+ return 0; -+} -+ -+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, -+ void *sync_obj, -+ void *sync_obj_arg, -+ bool evict, bool no_wait, -+ struct ttm_mem_reg *new_mem) -+{ -+ struct ttm_bo_device * bdev = bo->bdev; -+ struct ttm_bo_driver * driver = bdev->driver; -+ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type]; -+ struct ttm_mem_reg * old_mem = &bo->mem; -+ int ret; -+ uint32_t save_flags = old_mem->flags; -+ uint32_t save_proposed_flags = old_mem->proposed_flags; -+ struct ttm_buffer_object * old_obj; -+ if (bo->sync_obj) -+ driver->sync_obj_unref(&bo->sync_obj); -+ bo->sync_obj = driver->sync_obj_ref(sync_obj); -+ bo->sync_obj_arg = sync_obj_arg; -+ if (evict) { -+ ret = ttm_bo_wait(bo, false, false, false); -+ if (ret) -+ return ret; -+ ttm_bo_free_old_node(bo); -+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) { -+ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; -+ } -+ } else { -+ -+ /* This should help pipeline ordinary buffer moves. -+ * -+ * Hang old buffer memory on a new buffer object, -+ * and leave it to be released when the GPU -+ * operation has completed. -+ */ -+ ret = ttm_buffer_object_transfer(bo, &old_obj); -+ if (ret) -+ return ret; -+ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) -+ old_obj->ttm = NULL; -+ else -+ bo->ttm = NULL; -+ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING; -+ ttm_bo_unreserve(old_obj); -+ } -+ -+ *old_mem = *new_mem; -+ new_mem->mm_node = NULL; -+ old_mem->proposed_flags = save_proposed_flags; -+ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE); -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c ---- a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,596 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+ -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_placement_common.h" -+#include <linux/mm.h> -+#include <linux/version.h> -+#include <linux/rbtree.h> -+#include <asm/uaccess.h> -+ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) -+#error "TTM doesn't build on kernel versions below 2.6.25." -+#endif -+ -+#define TTM_BO_VM_NUM_PREFAULT 16 -+ -+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, -+ unsigned long page_start, -+ unsigned long num_pages) -+{ -+ struct rb_node *cur = bdev->addr_space_rb.rb_node; -+ unsigned long cur_offset; -+ struct ttm_buffer_object *bo; -+ struct ttm_buffer_object *best_bo = NULL; -+ -+ while (likely(cur != NULL)) { -+ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); -+ cur_offset = bo->vm_node->start; -+ if (page_start >= cur_offset) { -+ cur = cur->rb_right; -+ best_bo = bo; -+ if (page_start == cur_offset) -+ break; -+ } else -+ cur = cur->rb_left; -+ } -+ -+ if (unlikely(best_bo == NULL)) -+ return NULL; -+ -+ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < -+ (page_start + num_pages))) -+ return NULL; -+ -+ return best_bo; -+} -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) -+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *) -+ vma->vm_private_data; -+ struct ttm_bo_device *bdev = bo->bdev; -+ unsigned long bus_base; -+ unsigned long bus_offset; -+ unsigned long bus_size; -+ unsigned long page_offset; -+ unsigned long page_last; -+ unsigned long pfn; -+ struct ttm_tt *ttm = NULL; -+ struct page *page; -+ int ret; -+ int i; -+ bool is_iomem; -+ unsigned long address = (unsigned long)vmf->virtual_address; -+ int retval = VM_FAULT_NOPAGE; -+ -+ ret = ttm_bo_reserve(bo, true, false, false, 0); -+ if (unlikely(ret != 0)) -+ return VM_FAULT_NOPAGE; -+ -+ mutex_lock(&bo->mutex); -+ -+ /* -+ * Wait for buffer data in transit, due to a pipelined -+ * move. -+ */ -+ -+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) { -+ ret = ttm_bo_wait(bo, false, true, false); -+ if (unlikely(ret != 0)) { -+ retval = (ret != -ERESTART) ? -+ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; -+ goto out_unlock; -+ } -+ } -+ -+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, -+ &bus_size); -+ if (unlikely(ret != 0)) { -+ retval = VM_FAULT_SIGBUS; -+ goto out_unlock; -+ } -+ -+ is_iomem = (bus_size != 0); -+ -+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + -+ bo->vm_node->start - vma->vm_pgoff; -+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + -+ bo->vm_node->start - vma->vm_pgoff; -+ -+ if (unlikely(page_offset >= bo->num_pages)) { -+ retval = VM_FAULT_SIGBUS; -+ goto out_unlock; -+ } -+ -+ /* -+ * Strictly, we're not allowed to modify vma->vm_page_prot here, -+ * since the mmap_sem is only held in read mode. However, we -+ * modify only the caching bits of vma->vm_page_prot and -+ * consider those bits protected by -+ * the bo->mutex, as we should be the only writers. -+ * There shouldn't really be any readers of these bits except -+ * within vm_insert_mixed()? fork? -+ * -+ * TODO: Add a list of vmas to the bo, and change the -+ * vma->vm_page_prot when the object changes caching policy, with -+ * the correct locks held. -+ */ -+ -+ if (is_iomem) { -+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags, -+ vma->vm_page_prot); -+ } else { -+ ttm = bo->ttm; -+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ? -+ vm_get_page_prot(vma->vm_flags) : -+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot); -+ } -+ -+ /* -+ * Speculatively prefault a number of pages. Only error on -+ * first page. -+ */ -+ -+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { -+ -+ if (is_iomem) -+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + -+ page_offset; -+ else { -+ page = ttm_tt_get_page(ttm, page_offset); -+ if (unlikely(!page && i == 0)) { -+ retval = VM_FAULT_OOM; -+ goto out_unlock; -+ } else if (unlikely(!page)) { -+ break; -+ } -+ pfn = page_to_pfn(page); -+ } -+ -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) -+ ret = vm_insert_mixed(vma, address, pfn); -+#else -+ ret = vm_insert_pfn(vma, address, pfn); -+#endif -+ /* -+ * Somebody beat us to this PTE or prefaulting to -+ * an already populated PTE, or prefaulting error. -+ */ -+ -+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) -+ break; -+ else if (unlikely(ret != 0)) { -+ retval = -+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; -+ goto out_unlock; -+ -+ } -+ -+ address += PAGE_SIZE; -+ if (unlikely(++page_offset >= page_last)) -+ break; -+ } -+ -+ out_unlock: -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ return retval; -+} -+ -+#else -+ -+static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma, -+ unsigned long address) -+{ -+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *) -+ vma->vm_private_data; -+ struct ttm_bo_device *bdev = bo->bdev; -+ unsigned long bus_base; -+ unsigned long bus_offset; -+ unsigned long bus_size; -+ unsigned long page_offset; -+ unsigned long page_last; -+ unsigned long pfn; -+ struct ttm_tt *ttm = NULL; -+ struct page *page; -+ int ret; -+ int i; -+ bool is_iomem; -+ unsigned long retval = NOPFN_REFAULT; -+ -+ ret = ttm_bo_reserve(bo, true, false, false, 0); -+ if (unlikely(ret != 0)) -+ return NOPFN_REFAULT; -+ -+ mutex_lock(&bo->mutex); -+ -+ /* -+ * Wait for buffer data in transit, due to a pipelined -+ * move. -+ */ -+ -+ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) { -+ ret = ttm_bo_wait(bo, false, true, false); -+ if (unlikely(ret != 0)) { -+ retval = (ret != -ERESTART) ? -+ NOPFN_SIGBUS : NOPFN_REFAULT; -+ goto out_unlock; -+ } -+ } -+ -+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, -+ &bus_size); -+ if (unlikely(ret != 0)) { -+ printk(KERN_ERR "Attempted buffer object access " -+ "of unmappable object.\n"); -+ retval = NOPFN_SIGBUS; -+ goto out_unlock; -+ } -+ -+ is_iomem = (bus_size != 0); -+ -+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + -+ bo->vm_node->start - vma->vm_pgoff; -+ -+ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + -+ bo->vm_node->start - vma->vm_pgoff; -+ -+ if (unlikely(page_offset >= bo->num_pages)) { -+ printk(KERN_ERR "Attempted buffer object access " -+ "outside object.\n"); -+ retval = NOPFN_SIGBUS; -+ goto out_unlock; -+ } -+ -+ /* -+ * Strictly, we're not allowed to modify vma->vm_page_prot here, -+ * since the mmap_sem is only held in read mode. However, we -+ * modify only the caching bits of vma->vm_page_prot and -+ * consider those bits protected by -+ * the bo->mutex, as we should be the only writers. -+ * There shouldn't really be any readers of these bits except -+ * within vm_insert_mixed()? fork? -+ * -+ * TODO: Add a list of vmas to the bo, and change the -+ * vma->vm_page_prot when the object changes caching policy, with -+ * the correct locks held. -+ */ -+ -+ if (is_iomem) { -+ vma->vm_page_prot = ttm_io_prot(bo->mem.flags, -+ vma->vm_page_prot); -+ } else { -+ ttm = bo->ttm; -+ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ? -+ vm_get_page_prot(vma->vm_flags) : -+ ttm_io_prot(bo->mem.flags, vma->vm_page_prot); -+ } -+ -+ /* -+ * Speculatively prefault a number of pages. Only error on -+ * first page. -+ */ -+ -+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { -+ -+ if (is_iomem) -+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + -+ page_offset; -+ else { -+ page = ttm_tt_get_page(ttm, page_offset); -+ if (unlikely(!page && i == 0)) { -+ retval = NOPFN_OOM; -+ goto out_unlock; -+ } else if (unlikely(!page)) { -+ break; -+ } -+ pfn = page_to_pfn(page); -+ } -+ -+ ret = vm_insert_pfn(vma, address, pfn); -+ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0))) -+ break; -+ -+ /* -+ * Somebody beat us to this PTE or prefaulting to -+ * an already populated PTE, or prefaulting error. -+ */ -+ -+ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) -+ break; -+ else if (unlikely(ret != 0)) { -+ retval = -+ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; -+ goto out_unlock; -+ } -+ -+ address += PAGE_SIZE; -+ if (unlikely(++page_offset >= page_last)) -+ break; -+ } -+ -+ out_unlock: -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ return retval; -+} -+#endif -+ -+static void ttm_bo_vm_open(struct vm_area_struct *vma) -+{ -+ struct ttm_buffer_object *bo = -+ (struct ttm_buffer_object *)vma->vm_private_data; -+ -+ (void)ttm_bo_reference(bo); -+} -+ -+static void ttm_bo_vm_close(struct vm_area_struct *vma) -+{ -+ struct ttm_buffer_object *bo = -+ (struct ttm_buffer_object *)vma->vm_private_data; -+ -+ ttm_bo_unref(&bo); -+ vma->vm_private_data = NULL; -+} -+ -+static struct vm_operations_struct ttm_bo_vm_ops = { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) -+ .fault = ttm_bo_vm_fault, -+#else -+ .nopfn = ttm_bo_vm_nopfn, -+#endif -+ .open = ttm_bo_vm_open, -+ .close = ttm_bo_vm_close -+}; -+ -+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, -+ struct ttm_bo_device *bdev) -+{ -+ struct ttm_bo_driver *driver; -+ struct ttm_buffer_object *bo; -+ int ret; -+ -+ read_lock(&bdev->vm_lock); -+ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, -+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); -+ if (likely(bo != NULL)) -+ ttm_bo_reference(bo); -+ read_unlock(&bdev->vm_lock); -+ -+ if (unlikely(bo == NULL)) { -+ printk(KERN_ERR "Could not find buffer object to map.\n"); -+ ret = -EINVAL; -+ goto out_unref; -+ } -+ -+ driver = bo->bdev->driver; -+ if (unlikely(!driver->verify_access)) { -+ ret = -EPERM; -+ goto out_unref; -+ } -+ ret = driver->verify_access(bo, filp); -+ if (unlikely(ret != 0)) -+ goto out_unref; -+ -+ vma->vm_ops = &ttm_bo_vm_ops; -+ -+ /* -+ * Note: We're transferring the bo reference to -+ * vma->vm_private_data here. -+ */ -+ -+ vma->vm_private_data = bo; -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) -+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; -+#else -+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; -+#endif -+ return 0; -+ out_unref: -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) -+{ -+ if (vma->vm_pgoff != 0) -+ return -EACCES; -+ -+ vma->vm_ops = &ttm_bo_vm_ops; -+ vma->vm_private_data = ttm_bo_reference(bo); -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) -+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; -+#else -+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; -+#endif -+ return 0; -+} -+ -+ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp, -+ const char __user * wbuf, char __user * rbuf, size_t count, -+ loff_t * f_pos, bool write) -+{ -+ struct ttm_buffer_object *bo; -+ struct ttm_bo_driver *driver; -+ struct ttm_bo_kmap_obj map; -+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); -+ unsigned long kmap_offset; -+ unsigned long kmap_end; -+ unsigned long kmap_num; -+ size_t io_size; -+ unsigned int page_offset; -+ char *virtual; -+ int ret; -+ bool no_wait = false; -+ bool dummy; -+ -+ driver = bo->bdev->driver; -+ read_lock(&bdev->vm_lock); -+ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); -+ if (likely(bo != NULL)) -+ ttm_bo_reference(bo); -+ read_unlock(&bdev->vm_lock); -+ -+ if (unlikely(bo == NULL)) -+ return -EFAULT; -+ -+ if (unlikely(driver->verify_access)) -+ return -EPERM; -+ -+ ret = driver->verify_access(bo, filp); -+ if (unlikely(ret != 0)) -+ goto out_unref; -+ -+ kmap_offset = dev_offset - bo->vm_node->start; -+ if (unlikely(kmap_offset) >= bo->num_pages) { -+ ret = -EFBIG; -+ goto out_unref; -+ } -+ -+ page_offset = *f_pos & ~PAGE_MASK; -+ io_size = bo->num_pages - kmap_offset; -+ io_size = (io_size << PAGE_SHIFT) - page_offset; -+ if (count < io_size) -+ io_size = count; -+ -+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; -+ kmap_num = kmap_end - kmap_offset + 1; -+ -+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); -+ -+ switch (ret) { -+ case 0: -+ break; -+ case -ERESTART: -+ ret = -EINTR; -+ goto out_unref; -+ case -EBUSY: -+ ret = -EAGAIN; -+ goto out_unref; -+ default: -+ goto out_unref; -+ } -+ -+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); -+ if (unlikely(ret != 0)) -+ goto out_unref; -+ -+ virtual = ttm_kmap_obj_virtual(&map, &dummy); -+ virtual += page_offset; -+ -+ if (write) -+ ret = copy_from_user(virtual, wbuf, io_size); -+ else -+ ret = copy_to_user(rbuf, virtual, io_size); -+ -+ ttm_bo_kunmap(&map); -+ ttm_bo_unreserve(bo); -+ ttm_bo_unref(&bo); -+ -+ if (unlikely(ret != 0)) -+ return -EFBIG; -+ -+ *f_pos += io_size; -+ -+ return io_size; -+ out_unref: -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf, -+ char __user * rbuf, size_t count, loff_t * f_pos, -+ bool write) -+{ -+ struct ttm_bo_kmap_obj map; -+ unsigned long kmap_offset; -+ unsigned long kmap_end; -+ unsigned long kmap_num; -+ size_t io_size; -+ unsigned int page_offset; -+ char *virtual; -+ int ret; -+ bool no_wait = false; -+ bool dummy; -+ -+ kmap_offset = (*f_pos >> PAGE_SHIFT); -+ if (unlikely(kmap_offset) >= bo->num_pages) -+ return -EFBIG; -+ -+ page_offset = *f_pos & ~PAGE_MASK; -+ io_size = bo->num_pages - kmap_offset; -+ io_size = (io_size << PAGE_SHIFT) - page_offset; -+ if (count < io_size) -+ io_size = count; -+ -+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; -+ kmap_num = kmap_end - kmap_offset + 1; -+ -+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0); -+ -+ switch (ret) { -+ case 0: -+ break; -+ case -ERESTART: -+ return -EINTR; -+ case -EBUSY: -+ return -EAGAIN; -+ default: -+ return ret; -+ } -+ -+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ virtual = ttm_kmap_obj_virtual(&map, &dummy); -+ virtual += page_offset; -+ -+ if (write) -+ ret = copy_from_user(virtual, wbuf, io_size); -+ else -+ ret = copy_to_user(rbuf, virtual, io_size); -+ -+ ttm_bo_kunmap(&map); -+ ttm_bo_unreserve(bo); -+ ttm_bo_unref(&bo); -+ -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ *f_pos += io_size; -+ -+ return io_size; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c ---- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,115 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#include "ttm/ttm_execbuf_util.h" -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_placement_common.h" -+#include <linux/wait.h> -+#include <linux/sched.h> -+ -+void ttm_eu_backoff_reservation(struct list_head *list) -+{ -+ struct ttm_validate_buffer *entry; -+ -+ list_for_each_entry(entry, list, head) { -+ struct ttm_buffer_object *bo = entry->bo; -+ if (!entry->reserved) -+ continue; -+ -+ entry->reserved = false; -+ ttm_bo_unreserve(bo); -+ } -+} -+ -+/* -+ * Reserve buffers for validation. -+ * -+ * If a buffer in the list is marked for CPU access, we back off and -+ * wait for that buffer to become free for GPU access. -+ * -+ * If a buffer is reserved for another validation, the validator with -+ * the highest validation sequence backs off and waits for that buffer -+ * to become unreserved. This prevents deadlocks when validating multiple -+ * buffers in different orders. -+ */ -+ -+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) -+{ -+ struct ttm_validate_buffer *entry; -+ int ret; -+ -+ retry: -+ list_for_each_entry(entry, list, head) { -+ struct ttm_buffer_object *bo = entry->bo; -+ -+ entry->reserved = false; -+ ret = ttm_bo_reserve(bo, true, false, true, val_seq); -+ if (ret != 0) { -+ ttm_eu_backoff_reservation(list); -+ if (ret == -EAGAIN) { -+ ret = ttm_bo_wait_unreserved(bo, true); -+ if (unlikely(ret != 0)) -+ return ret; -+ goto retry; -+ } else -+ return ret; -+ } -+ -+ entry->reserved = true; -+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { -+ ttm_eu_backoff_reservation(list); -+ ret = ttm_bo_wait_cpu(bo, false); -+ if (ret) -+ return ret; -+ goto retry; -+ } -+ } -+ return 0; -+} -+ -+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) -+{ -+ struct ttm_validate_buffer *entry; -+ -+ list_for_each_entry(entry, list, head) { -+ struct ttm_buffer_object *bo = entry->bo; -+ struct ttm_bo_driver *driver = bo->bdev->driver; -+ void *old_sync_obj; -+ -+ mutex_lock(&bo->mutex); -+ old_sync_obj = bo->sync_obj; -+ bo->sync_obj = driver->sync_obj_ref(sync_obj); -+ bo->sync_obj_arg = entry->new_sync_obj_arg; -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ entry->reserved = false; -+ if (old_sync_obj) -+ driver->sync_obj_unref(&old_sync_obj); -+ } -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h ---- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,110 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_EXECBUF_UTIL_H_ -+#define _TTM_EXECBUF_UTIL_H_ -+ -+#include "ttm/ttm_bo_api.h" -+#include "ttm/ttm_fence_api.h" -+#include <linux/list.h> -+ -+/** -+ * struct ttm_validate_buffer -+ * -+ * @head: list head for thread-private list. -+ * @bo: refcounted buffer object pointer. -+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once -+ * adding a new sync object. -+ * @reservied: Indicates whether @bo has been reserved for validation. -+ */ -+ -+struct ttm_validate_buffer { -+ struct list_head head; -+ struct ttm_buffer_object *bo; -+ void *new_sync_obj_arg; -+ bool reserved; -+}; -+ -+/** -+ * function ttm_eu_backoff_reservation -+ * -+ * @list: thread private list of ttm_validate_buffer structs. -+ * -+ * Undoes all buffer validation reservations for bos pointed to by -+ * the list entries. -+ */ -+ -+extern void ttm_eu_backoff_reservation(struct list_head *list); -+ -+/** -+ * function ttm_eu_reserve_buffers -+ * -+ * @list: thread private list of ttm_validate_buffer structs. -+ * @val_seq: A unique sequence number. -+ * -+ * Tries to reserve bos pointed to by the list entries for validation. -+ * If the function returns 0, all buffers are marked as "unfenced", -+ * taken off the lru lists and are not synced for write CPU usage. -+ * -+ * If the function detects a deadlock due to multiple threads trying to -+ * reserve the same buffers in reverse order, all threads except one will -+ * back off and retry. This function may sleep while waiting for -+ * CPU write reservations to be cleared, and for other threads to -+ * unreserve their buffers. -+ * -+ * This function may return -ERESTART or -EAGAIN if the calling process -+ * receives a signal while waiting. In that case, no buffers on the list -+ * will be reserved upon return. -+ * -+ * Buffers reserved by this function should be unreserved by -+ * a call to either ttm_eu_backoff_reservation() or -+ * ttm_eu_fence_buffer_objects() when command submission is complete or -+ * has failed. -+ */ -+ -+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); -+ -+/** -+ * function ttm_eu_fence_buffer_objects. -+ * -+ * @list: thread private list of ttm_validate_buffer structs. -+ * @sync_obj: The new sync object for the buffers. -+ * -+ * This function should be called when command submission is complete, and -+ * it will add a new sync object to bos pointed to by entries on @list. -+ * It also unreserves all buffers, putting them on lru lists. -+ * -+ */ -+ -+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h ---- a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,277 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+#ifndef _TTM_FENCE_API_H_ -+#define _TTM_FENCE_API_H_ -+ -+#include <linux/list.h> -+#include <linux/kref.h> -+ -+#define TTM_FENCE_FLAG_EMIT (1 << 0) -+#define TTM_FENCE_TYPE_EXE (1 << 0) -+ -+struct ttm_fence_device; -+ -+/** -+ * struct ttm_fence_info -+ * -+ * @fence_class: The fence class. -+ * @fence_type: Bitfield indicating types for this fence. -+ * @signaled_types: Bitfield indicating which types are signaled. -+ * @error: Last error reported from the device. -+ * -+ * Used as output from the ttm_fence_get_info -+ */ -+ -+struct ttm_fence_info { -+ uint32_t signaled_types; -+ uint32_t error; -+}; -+ -+/** -+ * struct ttm_fence_object -+ * -+ * @fdev: Pointer to the fence device struct. -+ * @kref: Holds the reference count of this fence object. -+ * @ring: List head used for the circular list of not-completely -+ * signaled fences. -+ * @info: Data for fast retrieval using the ttm_fence_get_info() -+ * function. -+ * @timeout_jiffies: Absolute jiffies value indicating when this fence -+ * object times out and, if waited on, calls ttm_fence_lockup -+ * to check for and resolve a GPU lockup. -+ * @sequence: Fence sequence number. -+ * @waiting_types: Types currently waited on. -+ * @destroy: Called to free the fence object, when its refcount has -+ * reached zero. If NULL, kfree is used. -+ * -+ * This struct is provided in the driver interface so that drivers can -+ * derive from it and create their own fence implementation. All members -+ * are private to the fence implementation and the fence driver callbacks. -+ * Otherwise a driver may access the derived object using container_of(). -+ */ -+ -+struct ttm_fence_object { -+ struct ttm_fence_device *fdev; -+ struct kref kref; -+ uint32_t fence_class; -+ uint32_t fence_type; -+ -+ /* -+ * The below fields are protected by the fence class -+ * manager spinlock. -+ */ -+ -+ struct list_head ring; -+ struct ttm_fence_info info; -+ unsigned long timeout_jiffies; -+ uint32_t sequence; -+ uint32_t waiting_types; -+ void (*destroy) (struct ttm_fence_object *); -+}; -+ -+/** -+ * ttm_fence_object_init -+ * -+ * @fdev: Pointer to a struct ttm_fence_device. -+ * @fence_class: Fence class for this fence. -+ * @type: Fence type for this fence. -+ * @create_flags: Flags indicating varios actions at init time. At this point -+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to -+ * the command stream. -+ * @destroy: Destroy function. If NULL, kfree() is used. -+ * @fence: The struct ttm_fence_object to initialize. -+ * -+ * Initialize a pre-allocated fence object. This function, together with the -+ * destroy function makes it possible to derive driver-specific fence objects. -+ */ -+ -+extern int -+ttm_fence_object_init(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t type, -+ uint32_t create_flags, -+ void (*destroy) (struct ttm_fence_object * fence), -+ struct ttm_fence_object *fence); -+ -+/** -+ * ttm_fence_object_create -+ * -+ * @fdev: Pointer to a struct ttm_fence_device. -+ * @fence_class: Fence class for this fence. -+ * @type: Fence type for this fence. -+ * @create_flags: Flags indicating varios actions at init time. At this point -+ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to -+ * the command stream. -+ * @c_fence: On successful termination, *(@c_fence) will point to the created -+ * fence object. -+ * -+ * Create and initialize a struct ttm_fence_object. The destroy function will -+ * be set to kfree(). -+ */ -+ -+extern int -+ttm_fence_object_create(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t type, -+ uint32_t create_flags, -+ struct ttm_fence_object **c_fence); -+ -+/** -+ * ttm_fence_object_wait -+ * -+ * @fence: The fence object to wait on. -+ * @lazy: Allow sleeps to reduce the cpu-usage if polling. -+ * @interruptible: Sleep interruptible when waiting. -+ * @type_mask: Wait for the given type_mask to signal. -+ * -+ * Wait for a fence to signal the given type_mask. The function will -+ * perform a fence_flush using type_mask. (See ttm_fence_object_flush). -+ * -+ * Returns -+ * -ERESTART if interrupted by a signal. -+ * May return driver-specific error codes if timed-out. -+ */ -+ -+extern int -+ttm_fence_object_wait(struct ttm_fence_object *fence, -+ bool lazy, bool interruptible, uint32_t type_mask); -+ -+/** -+ * ttm_fence_object_flush -+ * -+ * @fence: The fence object to flush. -+ * @flush_mask: Fence types to flush. -+ * -+ * Make sure that the given fence eventually signals the -+ * types indicated by @flush_mask. Note that this may or may not -+ * map to a CPU or GPU flush. -+ */ -+ -+extern int -+ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask); -+ -+/** -+ * ttm_fence_get_info -+ * -+ * @fence: The fence object. -+ * -+ * Copy the info block from the fence while holding relevant locks. -+ */ -+ -+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence); -+ -+/** -+ * ttm_fence_object_ref -+ * -+ * @fence: The fence object. -+ * -+ * Return a ref-counted pointer to the fence object indicated by @fence. -+ */ -+ -+static inline struct ttm_fence_object *ttm_fence_object_ref(struct -+ ttm_fence_object -+ *fence) -+{ -+ kref_get(&fence->kref); -+ return fence; -+} -+ -+/** -+ * ttm_fence_object_unref -+ * -+ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object. -+ * -+ * Unreference the fence object pointed to by *(@p_fence), clearing -+ * *(p_fence). -+ */ -+ -+extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence); -+ -+/** -+ * ttm_fence_object_signaled -+ * -+ * @fence: Pointer to the struct ttm_fence_object. -+ * @mask: Type mask to check whether signaled. -+ * -+ * This function checks (without waiting) whether the fence object -+ * pointed to by @fence has signaled the types indicated by @mask, -+ * and returns 1 if true, 0 if false. This function does NOT perform -+ * an implicit fence flush. -+ */ -+ -+extern bool -+ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask); -+ -+/** -+ * ttm_fence_class -+ * -+ * @fence: Pointer to the struct ttm_fence_object. -+ * -+ * Convenience function that returns the fence class of a struct ttm_fence_object. -+ */ -+ -+static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence) -+{ -+ return fence->fence_class; -+} -+ -+/** -+ * ttm_fence_types -+ * -+ * @fence: Pointer to the struct ttm_fence_object. -+ * -+ * Convenience function that returns the fence types of a struct ttm_fence_object. -+ */ -+ -+static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence) -+{ -+ return fence->fence_type; -+} -+ -+/* -+ * The functions below are wrappers to the above functions, with -+ * similar names but with sync_obj omitted. These wrappers are intended -+ * to be plugged directly into the buffer object driver's sync object -+ * API, if the driver chooses to use ttm_fence_objects as buffer object -+ * sync objects. In the prototypes below, a sync_obj is cast to a -+ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing -+ * a fence_type argument. -+ */ -+ -+extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg); -+extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, -+ bool lazy, bool interruptible); -+extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg); -+extern void ttm_fence_sync_obj_unref(void **sync_obj); -+extern void *ttm_fence_sync_obj_ref(void *sync_obj); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c ---- a/drivers/gpu/drm/psb/ttm/ttm_fence.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,607 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_fence_api.h" -+#include "ttm/ttm_fence_driver.h" -+#include <linux/wait.h> -+#include <linux/sched.h> -+ -+#include <drm/drmP.h> -+ -+/* -+ * Simple implementation for now. -+ */ -+ -+static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask) -+{ -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ -+ printk(KERN_ERR "GPU lockup dectected on engine %u " -+ "fence type 0x%08x\n", -+ (unsigned int)fence->fence_class, (unsigned int)mask); -+ /* -+ * Give engines some time to idle? -+ */ -+ -+ write_lock(&fc->lock); -+ ttm_fence_handler(fence->fdev, fence->fence_class, -+ fence->sequence, mask, -EBUSY); -+ write_unlock(&fc->lock); -+} -+ -+/* -+ * Convenience function to be called by fence::wait methods that -+ * need polling. -+ */ -+ -+int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy, -+ bool interruptible, uint32_t mask) -+{ -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence); -+ uint32_t count = 0; -+ int ret; -+ unsigned long end_jiffies = fence->timeout_jiffies; -+ -+ DECLARE_WAITQUEUE(entry, current); -+ add_wait_queue(&fc->fence_queue, &entry); -+ -+ ret = 0; -+ -+ for (;;) { -+ __set_current_state((interruptible) ? -+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); -+ if (ttm_fence_object_signaled(fence, mask)) -+ break; -+ if (time_after_eq(jiffies, end_jiffies)) { -+ if (driver->lockup) -+ driver->lockup(fence, mask); -+ else -+ ttm_fence_lockup(fence, mask); -+ continue; -+ } -+ if (lazy) -+ schedule_timeout(1); -+ else if ((++count & 0x0F) == 0) { -+ __set_current_state(TASK_RUNNING); -+ schedule(); -+ __set_current_state((interruptible) ? -+ TASK_INTERRUPTIBLE : -+ TASK_UNINTERRUPTIBLE); -+ } -+ if (interruptible && signal_pending(current)) { -+ ret = -ERESTART; -+ break; -+ } -+ } -+ __set_current_state(TASK_RUNNING); -+ remove_wait_queue(&fc->fence_queue, &entry); -+ return ret; -+} -+ -+/* -+ * Typically called by the IRQ handler. -+ */ -+ -+void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class, -+ uint32_t sequence, uint32_t type, uint32_t error) -+{ -+ int wake = 0; -+ uint32_t diff; -+ uint32_t relevant_type; -+ uint32_t new_type; -+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; -+ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev); -+ struct list_head *head; -+ struct ttm_fence_object *fence, *next; -+ bool found = false; -+ -+ if (list_empty(&fc->ring)) -+ return; -+ -+ list_for_each_entry(fence, &fc->ring, ring) { -+ diff = (sequence - fence->sequence) & fc->sequence_mask; -+ if (diff > fc->wrap_diff) { -+ found = true; -+ break; -+ } -+ } -+ -+ fc->waiting_types &= ~type; -+ head = (found) ? &fence->ring : &fc->ring; -+ -+ list_for_each_entry_safe_reverse(fence, next, head, ring) { -+ if (&fence->ring == &fc->ring) -+ break; -+ -+ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n", -+ (unsigned long)fence, fence->sequence, -+ fence->fence_type); -+ -+ if (error) { -+ fence->info.error = error; -+ fence->info.signaled_types = fence->fence_type; -+ list_del_init(&fence->ring); -+ wake = 1; -+ break; -+ } -+ -+ relevant_type = type & fence->fence_type; -+ new_type = (fence->info.signaled_types | relevant_type) ^ -+ fence->info.signaled_types; -+ -+ if (new_type) { -+ fence->info.signaled_types |= new_type; -+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", -+ (unsigned long)fence, -+ fence->info.signaled_types); -+ -+ if (unlikely(driver->signaled)) -+ driver->signaled(fence); -+ -+ if (driver->needed_flush) -+ fc->pending_flush |= -+ driver->needed_flush(fence); -+ -+ if (new_type & fence->waiting_types) -+ wake = 1; -+ } -+ -+ fc->waiting_types |= -+ fence->waiting_types & ~fence->info.signaled_types; -+ -+ if (!(fence->fence_type & ~fence->info.signaled_types)) { -+ DRM_DEBUG("Fence completely signaled 0x%08lx\n", -+ (unsigned long)fence); -+ list_del_init(&fence->ring); -+ } -+ } -+ -+ /* -+ * Reinstate lost waiting types. -+ */ -+ -+ if ((fc->waiting_types & type) != type) { -+ head = head->prev; -+ list_for_each_entry(fence, head, ring) { -+ if (&fence->ring == &fc->ring) -+ break; -+ diff = -+ (fc->highest_waiting_sequence - -+ fence->sequence) & fc->sequence_mask; -+ if (diff > fc->wrap_diff) -+ break; -+ -+ fc->waiting_types |= -+ fence->waiting_types & ~fence->info.signaled_types; -+ } -+ } -+ -+ if (wake) -+ wake_up_all(&fc->fence_queue); -+} -+ -+static void ttm_fence_unring(struct ttm_fence_object *fence) -+{ -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ unsigned long irq_flags; -+ -+ write_lock_irqsave(&fc->lock, irq_flags); -+ list_del_init(&fence->ring); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+} -+ -+bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask) -+{ -+ unsigned long flags; -+ bool signaled; -+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence); -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ -+ mask &= fence->fence_type; -+ read_lock_irqsave(&fc->lock, flags); -+ signaled = (mask & fence->info.signaled_types) == mask; -+ read_unlock_irqrestore(&fc->lock, flags); -+ if (!signaled && driver->poll) { -+ write_lock_irqsave(&fc->lock, flags); -+ driver->poll(fence->fdev, fence->fence_class, mask); -+ signaled = (mask & fence->info.signaled_types) == mask; -+ write_unlock_irqrestore(&fc->lock, flags); -+ } -+ return signaled; -+} -+ -+int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type) -+{ -+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence); -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ unsigned long irq_flags; -+ uint32_t saved_pending_flush; -+ uint32_t diff; -+ bool call_flush; -+ -+ if (type & ~fence->fence_type) { -+ DRM_ERROR("Flush trying to extend fence type, " -+ "0x%x, 0x%x\n", type, fence->fence_type); -+ return -EINVAL; -+ } -+ -+ write_lock_irqsave(&fc->lock, irq_flags); -+ fence->waiting_types |= type; -+ fc->waiting_types |= fence->waiting_types; -+ diff = (fence->sequence - fc->highest_waiting_sequence) & -+ fc->sequence_mask; -+ -+ if (diff < fc->wrap_diff) -+ fc->highest_waiting_sequence = fence->sequence; -+ -+ /* -+ * fence->waiting_types has changed. Determine whether -+ * we need to initiate some kind of flush as a result of this. -+ */ -+ -+ saved_pending_flush = fc->pending_flush; -+ if (driver->needed_flush) -+ fc->pending_flush |= driver->needed_flush(fence); -+ -+ if (driver->poll) -+ driver->poll(fence->fdev, fence->fence_class, -+ fence->waiting_types); -+ -+ call_flush = (fc->pending_flush != 0); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+ -+ if (call_flush && driver->flush) -+ driver->flush(fence->fdev, fence->fence_class); -+ -+ return 0; -+} -+ -+/* -+ * Make sure old fence objects are signaled before their fence sequences are -+ * wrapped around and reused. -+ */ -+ -+void ttm_fence_flush_old(struct ttm_fence_device *fdev, -+ uint32_t fence_class, uint32_t sequence) -+{ -+ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class]; -+ struct ttm_fence_object *fence; -+ unsigned long irq_flags; -+ const struct ttm_fence_driver *driver = fdev->driver; -+ bool call_flush; -+ -+ uint32_t diff; -+ -+ write_lock_irqsave(&fc->lock, irq_flags); -+ -+ list_for_each_entry_reverse(fence, &fc->ring, ring) { -+ diff = (sequence - fence->sequence) & fc->sequence_mask; -+ if (diff <= fc->flush_diff) -+ break; -+ -+ fence->waiting_types = fence->fence_type; -+ fc->waiting_types |= fence->fence_type; -+ -+ if (driver->needed_flush) -+ fc->pending_flush |= driver->needed_flush(fence); -+ } -+ -+ if (driver->poll) -+ driver->poll(fdev, fence_class, fc->waiting_types); -+ -+ call_flush = (fc->pending_flush != 0); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+ -+ if (call_flush && driver->flush) -+ driver->flush(fdev, fence->fence_class); -+ -+ /* -+ * FIXME: Shold we implement a wait here for really old fences? -+ */ -+ -+} -+ -+int ttm_fence_object_wait(struct ttm_fence_object *fence, -+ bool lazy, bool interruptible, uint32_t mask) -+{ -+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence); -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ int ret = 0; -+ unsigned long timeout; -+ unsigned long cur_jiffies; -+ unsigned long to_jiffies; -+ -+ if (mask & ~fence->fence_type) { -+ DRM_ERROR("Wait trying to extend fence type" -+ " 0x%08x 0x%08x\n", mask, fence->fence_type); -+ BUG(); -+ return -EINVAL; -+ } -+ -+ if (driver->wait) -+ return driver->wait(fence, lazy, interruptible, mask); -+ -+ ttm_fence_object_flush(fence, mask); -+ retry: -+ if (!driver->has_irq || -+ driver->has_irq(fence->fdev, fence->fence_class, mask)) { -+ -+ cur_jiffies = jiffies; -+ to_jiffies = fence->timeout_jiffies; -+ -+ timeout = (time_after(to_jiffies, cur_jiffies)) ? -+ to_jiffies - cur_jiffies : 1; -+ -+ if (interruptible) -+ ret = wait_event_interruptible_timeout -+ (fc->fence_queue, -+ ttm_fence_object_signaled(fence, mask), timeout); -+ else -+ ret = wait_event_timeout -+ (fc->fence_queue, -+ ttm_fence_object_signaled(fence, mask), timeout); -+ -+ if (unlikely(ret == -ERESTARTSYS)) -+ return -ERESTART; -+ -+ if (unlikely(ret == 0)) { -+ if (driver->lockup) -+ driver->lockup(fence, mask); -+ else -+ ttm_fence_lockup(fence, mask); -+ goto retry; -+ } -+ -+ return 0; -+ } -+ -+ return ttm_fence_wait_polling(fence, lazy, interruptible, mask); -+} -+ -+int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags, -+ uint32_t fence_class, uint32_t type) -+{ -+ const struct ttm_fence_driver *driver = ttm_fence_driver(fence); -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ unsigned long flags; -+ uint32_t sequence; -+ unsigned long timeout; -+ int ret; -+ -+ ttm_fence_unring(fence); -+ ret = driver->emit(fence->fdev, -+ fence_class, fence_flags, &sequence, &timeout); -+ if (ret) -+ return ret; -+ -+ write_lock_irqsave(&fc->lock, flags); -+ fence->fence_class = fence_class; -+ fence->fence_type = type; -+ fence->waiting_types = 0; -+ fence->info.signaled_types = 0; -+ fence->info.error = 0; -+ fence->sequence = sequence; -+ fence->timeout_jiffies = timeout; -+ if (list_empty(&fc->ring)) -+ fc->highest_waiting_sequence = sequence - 1; -+ list_add_tail(&fence->ring, &fc->ring); -+ fc->latest_queued_sequence = sequence; -+ write_unlock_irqrestore(&fc->lock, flags); -+ return 0; -+} -+ -+int ttm_fence_object_init(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t type, -+ uint32_t create_flags, -+ void (*destroy) (struct ttm_fence_object *), -+ struct ttm_fence_object *fence) -+{ -+ int ret = 0; -+ -+ kref_init(&fence->kref); -+ fence->fence_class = fence_class; -+ fence->fence_type = type; -+ fence->info.signaled_types = 0; -+ fence->waiting_types = 0; -+ fence->sequence = 0; -+ fence->info.error = 0; -+ fence->fdev = fdev; -+ fence->destroy = destroy; -+ INIT_LIST_HEAD(&fence->ring); -+ atomic_inc(&fdev->count); -+ -+ if (create_flags & TTM_FENCE_FLAG_EMIT) { -+ ret = ttm_fence_object_emit(fence, create_flags, -+ fence->fence_class, type); -+ } -+ -+ return ret; -+} -+ -+int ttm_fence_object_create(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t type, -+ uint32_t create_flags, -+ struct ttm_fence_object **c_fence) -+{ -+ struct ttm_fence_object *fence; -+ int ret; -+ -+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false); -+ if (unlikely(ret != 0)) { -+ printk(KERN_ERR "Out of memory creating fence object\n"); -+ return ret; -+ } -+ -+ fence = kmalloc(sizeof(*fence), GFP_KERNEL); -+ if (!fence) { -+ printk(KERN_ERR "Out of memory creating fence object\n"); -+ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false); -+ return -ENOMEM; -+ } -+ -+ ret = ttm_fence_object_init(fdev, fence_class, type, -+ create_flags, NULL, fence); -+ if (ret) { -+ ttm_fence_object_unref(&fence); -+ return ret; -+ } -+ *c_fence = fence; -+ -+ return 0; -+} -+ -+static void ttm_fence_object_destroy(struct kref *kref) -+{ -+ struct ttm_fence_object *fence = -+ container_of(kref, struct ttm_fence_object, kref); -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ unsigned long irq_flags; -+ -+ write_lock_irqsave(&fc->lock, irq_flags); -+ list_del_init(&fence->ring); -+ write_unlock_irqrestore(&fc->lock, irq_flags); -+ -+ atomic_dec(&fence->fdev->count); -+ if (fence->destroy) -+ fence->destroy(fence); -+ else { -+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false); -+ kfree(fence); -+ } -+} -+ -+void ttm_fence_device_release(struct ttm_fence_device *fdev) -+{ -+ kfree(fdev->fence_class); -+} -+ -+int -+ttm_fence_device_init(int num_classes, -+ struct ttm_mem_global *mem_glob, -+ struct ttm_fence_device *fdev, -+ const struct ttm_fence_class_init *init, -+ bool replicate_init, const struct ttm_fence_driver *driver) -+{ -+ struct ttm_fence_class_manager *fc; -+ const struct ttm_fence_class_init *fci; -+ int i; -+ -+ fdev->mem_glob = mem_glob; -+ fdev->fence_class = kzalloc(num_classes * -+ sizeof(*fdev->fence_class), GFP_KERNEL); -+ -+ if (unlikely(!fdev->fence_class)) -+ return -ENOMEM; -+ -+ fdev->num_classes = num_classes; -+ atomic_set(&fdev->count, 0); -+ fdev->driver = driver; -+ -+ for (i = 0; i < fdev->num_classes; ++i) { -+ fc = &fdev->fence_class[i]; -+ fci = &init[(replicate_init) ? 0 : i]; -+ -+ fc->wrap_diff = fci->wrap_diff; -+ fc->flush_diff = fci->flush_diff; -+ fc->sequence_mask = fci->sequence_mask; -+ -+ rwlock_init(&fc->lock); -+ INIT_LIST_HEAD(&fc->ring); -+ init_waitqueue_head(&fc->fence_queue); -+ } -+ -+ return 0; -+} -+ -+struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence) -+{ -+ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence); -+ struct ttm_fence_info tmp; -+ unsigned long irq_flags; -+ -+ read_lock_irqsave(&fc->lock, irq_flags); -+ tmp = fence->info; -+ read_unlock_irqrestore(&fc->lock, irq_flags); -+ -+ return tmp; -+} -+ -+void ttm_fence_object_unref(struct ttm_fence_object **p_fence) -+{ -+ struct ttm_fence_object *fence = *p_fence; -+ -+ *p_fence = NULL; -+ (void)kref_put(&fence->kref, &ttm_fence_object_destroy); -+} -+ -+/* -+ * Placement / BO sync object glue. -+ */ -+ -+bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg) -+{ -+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; -+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; -+ -+ return ttm_fence_object_signaled(fence, fence_types); -+} -+ -+int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg, -+ bool lazy, bool interruptible) -+{ -+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; -+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; -+ -+ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types); -+} -+ -+int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg) -+{ -+ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj; -+ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg; -+ -+ return ttm_fence_object_flush(fence, fence_types); -+} -+ -+void ttm_fence_sync_obj_unref(void **sync_obj) -+{ -+ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj); -+} -+ -+void *ttm_fence_sync_obj_ref(void *sync_obj) -+{ -+ return (void *) -+ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj); -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h ---- a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,309 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+#ifndef _TTM_FENCE_DRIVER_H_ -+#define _TTM_FENCE_DRIVER_H_ -+ -+#include <linux/kref.h> -+#include <linux/spinlock.h> -+#include <linux/wait.h> -+#include "ttm_fence_api.h" -+#include "ttm_memory.h" -+ -+/** @file ttm_fence_driver.h -+ * -+ * Definitions needed for a driver implementing the -+ * ttm_fence subsystem. -+ */ -+ -+/** -+ * struct ttm_fence_class_manager: -+ * -+ * @wrap_diff: Sequence difference to catch 32-bit wrapping. -+ * if (seqa - seqb) > @wrap_diff, then seqa < seqb. -+ * @flush_diff: Sequence difference to trigger fence flush. -+ * if (cur_seq - seqa) > @flush_diff, then consider fence object with -+ * seqa as old an needing a flush. -+ * @sequence_mask: Mask of valid bits in a fence sequence. -+ * @lock: Lock protecting this struct as well as fence objects -+ * associated with this struct. -+ * @ring: Circular sequence-ordered list of fence objects. -+ * @pending_flush: Fence types currently needing a flush. -+ * @waiting_types: Fence types that are currently waited for. -+ * @fence_queue: Queue of waiters on fences belonging to this fence class. -+ * @highest_waiting_sequence: Sequence number of the fence with highest sequence -+ * number and that is waited for. -+ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring. -+ */ -+ -+struct ttm_fence_class_manager { -+ -+ /* -+ * Unprotected constant members. -+ */ -+ -+ uint32_t wrap_diff; -+ uint32_t flush_diff; -+ uint32_t sequence_mask; -+ -+ /* -+ * The rwlock protects this structure as well as -+ * the data in all fence objects belonging to this -+ * class. This should be OK as most fence objects are -+ * only read from once they're created. -+ */ -+ -+ rwlock_t lock; -+ struct list_head ring; -+ uint32_t pending_flush; -+ uint32_t waiting_types; -+ wait_queue_head_t fence_queue; -+ uint32_t highest_waiting_sequence; -+ uint32_t latest_queued_sequence; -+}; -+ -+/** -+ * struct ttm_fence_device -+ * -+ * @fence_class: Array of fence class managers. -+ * @num_classes: Array dimension of @fence_class. -+ * @count: Current number of fence objects for statistics. -+ * @driver: Driver struct. -+ * -+ * Provided in the driver interface so that the driver can derive -+ * from this struct for its driver_private, and accordingly -+ * access the driver_private from the fence driver callbacks. -+ * -+ * All members except "count" are initialized at creation and -+ * never touched after that. No protection needed. -+ * -+ * This struct is private to the fence implementation and to the fence -+ * driver callbacks, and may otherwise be used by drivers only to -+ * obtain the derived device_private object using container_of(). -+ */ -+ -+struct ttm_fence_device { -+ struct ttm_mem_global *mem_glob; -+ struct ttm_fence_class_manager *fence_class; -+ uint32_t num_classes; -+ atomic_t count; -+ const struct ttm_fence_driver *driver; -+}; -+ -+/** -+ * struct ttm_fence_class_init -+ * -+ * @wrap_diff: Fence sequence number wrap indicator. If -+ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is -+ * considered to be older than sequence2. -+ * @flush_diff: Fence sequence number flush indicator. -+ * If a non-completely-signaled fence has a fence sequence number -+ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff, -+ * the fence is considered too old and it will be flushed upon the -+ * next call of ttm_fence_flush_old(), to make sure no fences with -+ * stale sequence numbers remains unsignaled. @flush_diff should -+ * be sufficiently less than @wrap_diff. -+ * @sequence_mask: Mask with valid bits of the fence sequence -+ * number set to 1. -+ * -+ * This struct is used as input to ttm_fence_device_init. -+ */ -+ -+struct ttm_fence_class_init { -+ uint32_t wrap_diff; -+ uint32_t flush_diff; -+ uint32_t sequence_mask; -+}; -+ -+/** -+ * struct ttm_fence_driver -+ * -+ * @has_irq: Called by a potential waiter. Should return 1 if a -+ * fence object with indicated parameters is expected to signal -+ * automatically, and 0 if the fence implementation needs to -+ * repeatedly call @poll to make it signal. -+ * @emit: Make sure a fence with the given parameters is -+ * present in the indicated command stream. Return its sequence number -+ * in "breadcrumb". -+ * @poll: Check and report sequences of the given "fence_class" -+ * that have signaled "types" -+ * @flush: Make sure that the types indicated by the bitfield -+ * ttm_fence_class_manager::pending_flush will eventually -+ * signal. These bits have been put together using the -+ * result from the needed_flush function described below. -+ * @needed_flush: Given the fence_class and fence_types indicated by -+ * "fence", and the last received fence sequence of this -+ * fence class, indicate what types need a fence flush to -+ * signal. Return as a bitfield. -+ * @wait: Set to non-NULL if the driver wants to override the fence -+ * wait implementation. Return 0 on success, -EBUSY on failure, -+ * and -ERESTART if interruptible and a signal is pending. -+ * @signaled: Driver callback that is called whenever a -+ * ttm_fence_object::signaled_types has changed status. -+ * This function is called from atomic context, -+ * with the ttm_fence_class_manager::lock held in write mode. -+ * @lockup: Driver callback that is called whenever a wait has exceeded -+ * the lifetime of a fence object. -+ * If there is a GPU lockup, -+ * this function should, if possible, reset the GPU, -+ * call the ttm_fence_handler with an error status, and -+ * return. If no lockup was detected, simply extend the -+ * fence timeout_jiffies and return. The driver might -+ * want to protect the lockup check with a mutex and cache a -+ * non-locked-up status for a while to avoid an excessive -+ * amount of lockup checks from every waiting thread. -+ */ -+ -+struct ttm_fence_driver { -+ bool (*has_irq) (struct ttm_fence_device * fdev, -+ uint32_t fence_class, uint32_t flags); -+ int (*emit) (struct ttm_fence_device * fdev, -+ uint32_t fence_class, -+ uint32_t flags, -+ uint32_t * breadcrumb, unsigned long *timeout_jiffies); -+ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class); -+ void (*poll) (struct ttm_fence_device * fdev, -+ uint32_t fence_class, uint32_t types); -+ uint32_t(*needed_flush) -+ (struct ttm_fence_object * fence); -+ int (*wait) (struct ttm_fence_object * fence, bool lazy, -+ bool interruptible, uint32_t mask); -+ void (*signaled) (struct ttm_fence_object * fence); -+ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types); -+}; -+ -+/** -+ * function ttm_fence_device_init -+ * -+ * @num_classes: Number of fence classes for this fence implementation. -+ * @mem_global: Pointer to the global memory accounting info. -+ * @fdev: Pointer to an uninitialised struct ttm_fence_device. -+ * @init: Array of initialization info for each fence class. -+ * @replicate_init: Use the first @init initialization info for all classes. -+ * @driver: Driver callbacks. -+ * -+ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if -+ * out-of-memory. Otherwise returns 0. -+ */ -+extern int -+ttm_fence_device_init(int num_classes, -+ struct ttm_mem_global *mem_glob, -+ struct ttm_fence_device *fdev, -+ const struct ttm_fence_class_init *init, -+ bool replicate_init, -+ const struct ttm_fence_driver *driver); -+ -+/** -+ * function ttm_fence_device_release -+ * -+ * @fdev: Pointer to the fence device. -+ * -+ * Release all resources held by a fence device. Note that before -+ * this function is called, the caller must have made sure all fence -+ * objects belonging to this fence device are completely signaled. -+ */ -+ -+extern void ttm_fence_device_release(struct ttm_fence_device *fdev); -+ -+/** -+ * ttm_fence_handler - the fence handler. -+ * -+ * @fdev: Pointer to the fence device. -+ * @fence_class: Fence class that signals. -+ * @sequence: Signaled sequence. -+ * @type: Types that signal. -+ * @error: Error from the engine. -+ * -+ * This function signals all fences with a sequence previous to the -+ * @sequence argument, and belonging to @fence_class. The signaled fence -+ * types are provided in @type. If error is non-zero, the error member -+ * of the fence with sequence = @sequence is set to @error. This value -+ * may be reported back to user-space, indicating, for example an illegal -+ * 3D command or illegal mpeg data. -+ * -+ * This function is typically called from the driver::poll method when the -+ * command sequence preceding the fence marker has executed. It should be -+ * called with the ttm_fence_class_manager::lock held in write mode and -+ * may be called from interrupt context. -+ */ -+ -+extern void -+ttm_fence_handler(struct ttm_fence_device *fdev, -+ uint32_t fence_class, -+ uint32_t sequence, uint32_t type, uint32_t error); -+ -+/** -+ * ttm_fence_driver_from_dev -+ * -+ * @fdev: The ttm fence device. -+ * -+ * Returns a pointer to the fence driver struct. -+ */ -+ -+static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct -+ ttm_fence_device -+ *fdev) -+{ -+ return fdev->driver; -+} -+ -+/** -+ * ttm_fence_driver -+ * -+ * @fence: Pointer to a ttm fence object. -+ * -+ * Returns a pointer to the fence driver struct. -+ */ -+ -+static inline const struct ttm_fence_driver *ttm_fence_driver(struct -+ ttm_fence_object -+ *fence) -+{ -+ return ttm_fence_driver_from_dev(fence->fdev); -+} -+ -+/** -+ * ttm_fence_fc -+ * -+ * @fence: Pointer to a ttm fence object. -+ * -+ * Returns a pointer to the struct ttm_fence_class_manager for the -+ * fence class of @fence. -+ */ -+ -+static inline struct ttm_fence_class_manager *ttm_fence_fc(struct -+ ttm_fence_object -+ *fence) -+{ -+ return &fence->fdev->fence_class[fence->fence_class]; -+} -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c ---- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,242 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include <drm/drmP.h> -+#include "ttm/ttm_fence_user.h" -+#include "ttm/ttm_object.h" -+#include "ttm/ttm_fence_driver.h" -+#include "ttm/ttm_userobj_api.h" -+ -+/** -+ * struct ttm_fence_user_object -+ * -+ * @base: The base object used for user-space visibility and refcounting. -+ * -+ * @fence: The fence object itself. -+ * -+ */ -+ -+struct ttm_fence_user_object { -+ struct ttm_base_object base; -+ struct ttm_fence_object fence; -+}; -+ -+static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct -+ ttm_object_file -+ *tfile, -+ uint32_t -+ handle) -+{ -+ struct ttm_base_object *base; -+ -+ base = ttm_base_object_lookup(tfile, handle); -+ if (unlikely(base == NULL)) { -+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n", -+ (unsigned long)handle); -+ return NULL; -+ } -+ -+ if (unlikely(base->object_type != ttm_fence_type)) { -+ ttm_base_object_unref(&base); -+ printk(KERN_ERR "Invalid fence handle 0x%08lx\n", -+ (unsigned long)handle); -+ return NULL; -+ } -+ -+ return container_of(base, struct ttm_fence_user_object, base); -+} -+ -+/* -+ * The fence object destructor. -+ */ -+ -+static void ttm_fence_user_destroy(struct ttm_fence_object *fence) -+{ -+ struct ttm_fence_user_object *ufence = -+ container_of(fence, struct ttm_fence_user_object, fence); -+ -+ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false); -+ kfree(ufence); -+} -+ -+/* -+ * The base object destructor. We basically unly unreference the -+ * attached fence object. -+ */ -+ -+static void ttm_fence_user_release(struct ttm_base_object **p_base) -+{ -+ struct ttm_fence_user_object *ufence; -+ struct ttm_base_object *base = *p_base; -+ struct ttm_fence_object *fence; -+ -+ *p_base = NULL; -+ -+ if (unlikely(base == NULL)) -+ return; -+ -+ ufence = container_of(base, struct ttm_fence_user_object, base); -+ fence = &ufence->fence; -+ ttm_fence_object_unref(&fence); -+} -+ -+int -+ttm_fence_user_create(struct ttm_fence_device *fdev, -+ struct ttm_object_file *tfile, -+ uint32_t fence_class, -+ uint32_t fence_types, -+ uint32_t create_flags, -+ struct ttm_fence_object **fence, uint32_t * user_handle) -+{ -+ int ret; -+ struct ttm_fence_object *tmp; -+ struct ttm_fence_user_object *ufence; -+ -+ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false); -+ if (unlikely(ret != 0)) -+ return -ENOMEM; -+ -+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); -+ if (unlikely(ufence == NULL)) { -+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false); -+ return -ENOMEM; -+ } -+ -+ ret = ttm_fence_object_init(fdev, -+ fence_class, -+ fence_types, create_flags, -+ &ttm_fence_user_destroy, &ufence->fence); -+ -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ -+ /* -+ * One fence ref is held by the fence ptr we return. -+ * The other one by the base object. Need to up the -+ * fence refcount before we publish this object to -+ * user-space. -+ */ -+ -+ tmp = ttm_fence_object_ref(&ufence->fence); -+ ret = ttm_base_object_init(tfile, &ufence->base, -+ false, ttm_fence_type, -+ &ttm_fence_user_release, NULL); -+ -+ if (unlikely(ret != 0)) -+ goto out_err1; -+ -+ *fence = &ufence->fence; -+ *user_handle = ufence->base.hash.key; -+ -+ return 0; -+ out_err1: -+ ttm_fence_object_unref(&tmp); -+ tmp = &ufence->fence; -+ ttm_fence_object_unref(&tmp); -+ return ret; -+ out_err0: -+ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false); -+ kfree(ufence); -+ return ret; -+} -+ -+int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ int ret; -+ union ttm_fence_signaled_arg *arg = data; -+ struct ttm_fence_object *fence; -+ struct ttm_fence_info info; -+ struct ttm_fence_user_object *ufence; -+ struct ttm_base_object *base; -+ ret = 0; -+ -+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); -+ if (unlikely(ufence == NULL)) -+ return -EINVAL; -+ -+ fence = &ufence->fence; -+ -+ if (arg->req.flush) { -+ ret = ttm_fence_object_flush(fence, arg->req.fence_type); -+ if (unlikely(ret != 0)) -+ goto out; -+ } -+ -+ info = ttm_fence_get_info(fence); -+ arg->rep.signaled_types = info.signaled_types; -+ arg->rep.fence_error = info.error; -+ -+ out: -+ base = &ufence->base; -+ ttm_base_object_unref(&base); -+ return ret; -+} -+ -+int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ int ret; -+ union ttm_fence_finish_arg *arg = data; -+ struct ttm_fence_user_object *ufence; -+ struct ttm_base_object *base; -+ struct ttm_fence_object *fence; -+ ret = 0; -+ -+ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle); -+ if (unlikely(ufence == NULL)) -+ return -EINVAL; -+ -+ fence = &ufence->fence; -+ -+ ret = ttm_fence_object_wait(fence, -+ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY, -+ true, arg->req.fence_type); -+ if (likely(ret == 0)) { -+ struct ttm_fence_info info = ttm_fence_get_info(fence); -+ -+ arg->rep.signaled_types = info.signaled_types; -+ arg->rep.fence_error = info.error; -+ } -+ -+ base = &ufence->base; -+ ttm_base_object_unref(&base); -+ -+ return ret; -+} -+ -+int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ struct ttm_fence_unref_arg *arg = data; -+ int ret = 0; -+ -+ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type); -+ return ret; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h ---- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,147 @@ -+/************************************************************************** -+ * -+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ **************************************************************************/ -+/* -+ * Authors -+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef TTM_FENCE_USER_H -+#define TTM_FENCE_USER_H -+ -+#if !defined(__KERNEL__) && !defined(_KERNEL) -+#include <stdint.h> -+#endif -+ -+#define TTM_FENCE_MAJOR 0 -+#define TTM_FENCE_MINOR 1 -+#define TTM_FENCE_PL 0 -+#define TTM_FENCE_DATE "080819" -+ -+/** -+ * struct ttm_fence_signaled_req -+ * -+ * @handle: Handle to the fence object. Input. -+ * -+ * @fence_type: Fence types we want to flush. Input. -+ * -+ * @flush: Boolean. Flush the indicated fence_types. Input. -+ * -+ * Argument to the TTM_FENCE_SIGNALED ioctl. -+ */ -+ -+struct ttm_fence_signaled_req { -+ uint32_t handle; -+ uint32_t fence_type; -+ int32_t flush; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_fence_rep -+ * -+ * @signaled_types: Fence type that has signaled. -+ * -+ * @fence_error: Command execution error. -+ * Hardware errors that are consequences of the execution -+ * of the command stream preceding the fence are reported -+ * here. -+ * -+ * Output argument to the TTM_FENCE_SIGNALED and -+ * TTM_FENCE_FINISH ioctls. -+ */ -+ -+struct ttm_fence_rep { -+ uint32_t signaled_types; -+ uint32_t fence_error; -+}; -+ -+union ttm_fence_signaled_arg { -+ struct ttm_fence_signaled_req req; -+ struct ttm_fence_rep rep; -+}; -+ -+/* -+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl. -+ * -+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling -+ * wait. -+ * -+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU, -+ * but return -EBUSY if the buffer is busy. -+ */ -+ -+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0) -+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1) -+ -+/** -+ * struct ttm_fence_finish_req -+ * -+ * @handle: Handle to the fence object. Input. -+ * -+ * @fence_type: Fence types we want to finish. -+ * -+ * @mode: Wait mode. -+ * -+ * Input to the TTM_FENCE_FINISH ioctl. -+ */ -+ -+struct ttm_fence_finish_req { -+ uint32_t handle; -+ uint32_t fence_type; -+ uint32_t mode; -+ uint32_t pad64; -+}; -+ -+union ttm_fence_finish_arg { -+ struct ttm_fence_finish_req req; -+ struct ttm_fence_rep rep; -+}; -+ -+/** -+ * struct ttm_fence_unref_arg -+ * -+ * @handle: Handle to the fence object. -+ * -+ * Argument to the TTM_FENCE_UNREF ioctl. -+ */ -+ -+struct ttm_fence_unref_arg { -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/* -+ * Ioctl offsets frome extenstion start. -+ */ -+ -+#define TTM_FENCE_SIGNALED 0x01 -+#define TTM_FENCE_FINISH 0x02 -+#define TTM_FENCE_UNREF 0x03 -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c ---- a/drivers/gpu/drm/psb/ttm/ttm_lock.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,162 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_lock.h" -+#include <asm/atomic.h> -+#include <linux/errno.h> -+#include <linux/wait.h> -+#include <linux/sched.h> -+ -+void ttm_lock_init(struct ttm_lock *lock) -+{ -+ init_waitqueue_head(&lock->queue); -+ atomic_set(&lock->write_lock_pending, 0); -+ atomic_set(&lock->readers, 0); -+ lock->kill_takers = false; -+ lock->signal = SIGKILL; -+} -+ -+void ttm_read_unlock(struct ttm_lock *lock) -+{ -+ if (atomic_dec_and_test(&lock->readers)) -+ wake_up_all(&lock->queue); -+} -+ -+int ttm_read_lock(struct ttm_lock *lock, bool interruptible) -+{ -+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { -+ int ret; -+ -+ if (!interruptible) { -+ wait_event(lock->queue, -+ atomic_read(&lock->write_lock_pending) == 0); -+ continue; -+ } -+ ret = wait_event_interruptible -+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0); -+ if (ret) -+ return -ERESTART; -+ } -+ -+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { -+ int ret; -+ if (!interruptible) { -+ wait_event(lock->queue, -+ atomic_read(&lock->readers) != -1); -+ continue; -+ } -+ ret = wait_event_interruptible -+ (lock->queue, atomic_read(&lock->readers) != -1); -+ if (ret) -+ return -ERESTART; -+ } -+ -+ if (unlikely(lock->kill_takers)) { -+ send_sig(lock->signal, current, 0); -+ ttm_read_unlock(lock); -+ return -ERESTART; -+ } -+ -+ return 0; -+} -+ -+static int __ttm_write_unlock(struct ttm_lock *lock) -+{ -+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) -+ return -EINVAL; -+ wake_up_all(&lock->queue); -+ return 0; -+} -+ -+static void ttm_write_lock_remove(struct ttm_base_object **p_base) -+{ -+ struct ttm_base_object *base = *p_base; -+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base); -+ int ret; -+ -+ *p_base = NULL; -+ ret = __ttm_write_unlock(lock); -+ BUG_ON(ret != 0); -+} -+ -+int ttm_write_lock(struct ttm_lock *lock, -+ bool interruptible, -+ struct ttm_object_file *tfile) -+{ -+ int ret = 0; -+ -+ atomic_inc(&lock->write_lock_pending); -+ -+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { -+ if (!interruptible) { -+ wait_event(lock->queue, -+ atomic_read(&lock->readers) == 0); -+ continue; -+ } -+ ret = wait_event_interruptible -+ (lock->queue, atomic_read(&lock->readers) == 0); -+ -+ if (ret) { -+ if (atomic_dec_and_test(&lock->write_lock_pending)) -+ wake_up_all(&lock->queue); -+ return -ERESTART; -+ } -+ } -+ -+ if (atomic_dec_and_test(&lock->write_lock_pending)) -+ wake_up_all(&lock->queue); -+ -+ if (unlikely(lock->kill_takers)) { -+ send_sig(lock->signal, current, 0); -+ __ttm_write_unlock(lock); -+ return -ERESTART; -+ } -+ -+ /* -+ * Add a base-object, the destructor of which will -+ * make sure the lock is released if the client dies -+ * while holding it. -+ */ -+ -+ ret = ttm_base_object_init(tfile, &lock->base, false, -+ ttm_lock_type, &ttm_write_lock_remove, NULL); -+ if (ret) -+ (void)__ttm_write_unlock(lock); -+ -+ return ret; -+} -+ -+int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile) -+{ -+ return ttm_ref_object_base_unref(tfile, -+ lock->base.hash.key, TTM_REF_USAGE); -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h ---- a/drivers/gpu/drm/psb/ttm/ttm_lock.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,181 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+/** @file ttm_lock.h -+ * This file implements a simple replacement for the buffer manager use -+ * of the DRM heavyweight hardware lock. -+ * The lock is a read-write lock. Taking it in read mode is fast, and -+ * intended for in-kernel use only. -+ * Taking it in write mode is slow. -+ * -+ * The write mode is used only when there is a need to block all -+ * user-space processes from validating buffers. -+ * It's allowed to leave kernel space with the write lock held. -+ * If a user-space process dies while having the write-lock, -+ * it will be released during the file descriptor release. -+ * -+ * The read lock is typically placed at the start of an IOCTL- or -+ * user-space callable function that may end up allocating a memory area. -+ * This includes setstatus, super-ioctls and faults; the latter may move -+ * unmappable regions to mappable. It's a bug to leave kernel space with the -+ * read lock held. -+ * -+ * Both read- and write lock taking is interruptible for low signal-delivery -+ * latency. The locking functions will return -ERESTART if interrupted by a -+ * signal. -+ * -+ * Locking order: The lock should be taken BEFORE any TTM mutexes -+ * or spinlocks. -+ * -+ * Typical usages: -+ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock -+ * stops it from being repopulated. -+ * b) out-of-VRAM or out-of-aperture space, in which case the process -+ * receiving the out-of-space notification may take the lock in write mode -+ * and evict all buffers prior to start validating its own buffers. -+ */ -+ -+#ifndef _TTM_LOCK_H_ -+#define _TTM_LOCK_H_ -+ -+#include "ttm_object.h" -+#include <linux/wait.h> -+#include <asm/atomic.h> -+ -+/** -+ * struct ttm_lock -+ * -+ * @base: ttm base object used solely to release the lock if the client -+ * holding the lock dies. -+ * @queue: Queue for processes waiting for lock change-of-status. -+ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids -+ * write lock starvation. -+ * @readers: The lock status: A negative number indicates that a write lock is -+ * held. Positive values indicate number of concurrent readers. -+ */ -+ -+struct ttm_lock { -+ struct ttm_base_object base; -+ wait_queue_head_t queue; -+ atomic_t write_lock_pending; -+ atomic_t readers; -+ bool kill_takers; -+ int signal; -+}; -+ -+/** -+ * ttm_lock_init -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * Initializes the lock. -+ */ -+extern void ttm_lock_init(struct ttm_lock *lock); -+ -+/** -+ * ttm_read_unlock -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * -+ * Releases a read lock. -+ */ -+ -+extern void ttm_read_unlock(struct ttm_lock *lock); -+ -+/** -+ * ttm_read_unlock -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * @interruptible: Interruptible sleeping while waiting for a lock. -+ * -+ * Takes the lock in read mode. -+ * Returns: -+ * -ERESTART If interrupted by a signal and interruptible is true. -+ */ -+ -+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); -+ -+/** -+ * ttm_write_lock -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * @interruptible: Interruptible sleeping while waiting for a lock. -+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space -+ * application taking the lock. -+ * -+ * Takes the lock in write mode. -+ * Returns: -+ * -ERESTART If interrupted by a signal and interruptible is true. -+ * -ENOMEM: Out of memory when locking. -+ */ -+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible, -+ struct ttm_object_file *tfile); -+ -+/** -+ * ttm_write_unlock -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space -+ * application taking the lock. -+ * -+ * Releases a write lock. -+ * Returns: -+ * -EINVAL If the lock was not held. -+ */ -+extern int ttm_write_unlock(struct ttm_lock *lock, -+ struct ttm_object_file *tfile); -+ -+/** -+ * ttm_lock_set_kill -+ * -+ * @lock: Pointer to a struct ttm_lock -+ * @val: Boolean whether to kill processes taking the lock. -+ * @signal: Signal to send to the process taking the lock. -+ * -+ * The kill-when-taking-lock functionality is used to kill processes that keep -+ * on using the TTM functionality when its resources has been taken down, for -+ * example when the X server exits. A typical sequence would look like this: -+ * - X server takes lock in write mode. -+ * - ttm_lock_set_kill() is called with @val set to true. -+ * - As part of X server exit, TTM resources are taken down. -+ * - X server releases the lock on file release. -+ * - Another dri client wants to render, takes the lock and is killed. -+ * -+ */ -+ -+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal) -+{ -+ lock->kill_takers = val; -+ if (val) -+ lock->signal = signal; -+} -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c ---- a/drivers/gpu/drm/psb/ttm/ttm_memory.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,232 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#include "ttm/ttm_memory.h" -+#include <linux/spinlock.h> -+#include <linux/sched.h> -+#include <linux/wait.h> -+#include <linux/mm.h> -+ -+#define TTM_MEMORY_ALLOC_RETRIES 4 -+ -+/** -+ * At this point we only support a single shrink callback. -+ * Extend this if needed, perhaps using a linked list of callbacks. -+ * Note that this function is reentrant: -+ * many threads may try to swap out at any given time. -+ */ -+ -+static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, -+ uint64_t extra) -+{ -+ int ret; -+ struct ttm_mem_shrink *shrink; -+ uint64_t target; -+ uint64_t total_target; -+ -+ spin_lock(&glob->lock); -+ if (glob->shrink == NULL) -+ goto out; -+ -+ if (from_workqueue) { -+ target = glob->swap_limit; -+ total_target = glob->total_memory_swap_limit; -+ } else if (capable(CAP_SYS_ADMIN)) { -+ total_target = glob->emer_total_memory; -+ target = glob->emer_memory; -+ } else { -+ total_target = glob->max_total_memory; -+ target = glob->max_memory; -+ } -+ -+ total_target = (extra >= total_target) ? 0: total_target - extra; -+ target = (extra >= target) ? 0: target - extra; -+ -+ while (glob->used_memory > target || -+ glob->used_total_memory > total_target) { -+ shrink = glob->shrink; -+ spin_unlock(&glob->lock); -+ ret = shrink->do_shrink(shrink); -+ spin_lock(&glob->lock); -+ if (unlikely(ret != 0)) -+ goto out; -+ } -+ out: -+ spin_unlock(&glob->lock); -+} -+ -+static void ttm_shrink_work(struct work_struct *work) -+{ -+ struct ttm_mem_global *glob = -+ container_of(work, struct ttm_mem_global, work); -+ -+ ttm_shrink(glob, true, 0ULL); -+} -+ -+int ttm_mem_global_init(struct ttm_mem_global *glob) -+{ -+ struct sysinfo si; -+ uint64_t mem; -+ -+ spin_lock_init(&glob->lock); -+ glob->swap_queue = create_singlethread_workqueue("ttm_swap"); -+ INIT_WORK(&glob->work, ttm_shrink_work); -+ init_waitqueue_head(&glob->queue); -+ -+ si_meminfo(&si); -+ -+ mem = si.totalram - si.totalhigh; -+ mem *= si.mem_unit; -+ -+ glob->max_memory = mem >> 1; -+ glob->emer_memory = glob->max_memory + (mem >> 2); -+ glob->swap_limit = glob->max_memory - (mem >> 5); -+ glob->used_memory = 0; -+ glob->used_total_memory = 0; -+ glob->shrink = NULL; -+ -+ mem = si.totalram; -+ mem *= si.mem_unit; -+ -+ glob->max_total_memory = mem >> 1; -+ glob->emer_total_memory = glob->max_total_memory + (mem >> 2); -+ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5); -+ -+ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n", -+ glob->max_total_memory >> 20); -+ printk(KERN_INFO "TTM available object memory: %llu MiB\n", -+ glob->max_memory >> 20); -+ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n", -+ glob->swap_limit >> 20); -+ -+ return 0; -+} -+ -+void ttm_mem_global_release(struct ttm_mem_global *glob) -+{ -+ printk(KERN_INFO "Used total memory is %llu bytes.\n", -+ (unsigned long long)glob->used_total_memory); -+ flush_workqueue(glob->swap_queue); -+ destroy_workqueue(glob->swap_queue); -+ glob->swap_queue = NULL; -+} -+ -+static inline void ttm_check_swapping(struct ttm_mem_global *glob) -+{ -+ bool needs_swapping; -+ -+ spin_lock(&glob->lock); -+ needs_swapping = (glob->used_memory > glob->swap_limit || -+ glob->used_total_memory > -+ glob->total_memory_swap_limit); -+ spin_unlock(&glob->lock); -+ -+ if (unlikely(needs_swapping)) -+ (void)queue_work(glob->swap_queue, &glob->work); -+ -+} -+ -+void ttm_mem_global_free(struct ttm_mem_global *glob, -+ uint64_t amount, bool himem) -+{ -+ spin_lock(&glob->lock); -+ glob->used_total_memory -= amount; -+ if (!himem) -+ glob->used_memory -= amount; -+ wake_up_all(&glob->queue); -+ spin_unlock(&glob->lock); -+} -+ -+static int ttm_mem_global_reserve(struct ttm_mem_global *glob, -+ uint64_t amount, bool himem, bool reserve) -+{ -+ uint64_t limit; -+ uint64_t lomem_limit; -+ int ret = -ENOMEM; -+ -+ spin_lock(&glob->lock); -+ -+ if (capable(CAP_SYS_ADMIN)) { -+ limit = glob->emer_total_memory; -+ lomem_limit = glob->emer_memory; -+ } else { -+ limit = glob->max_total_memory; -+ lomem_limit = glob->max_memory; -+ } -+ -+ if (unlikely(glob->used_total_memory + amount > limit)) -+ goto out_unlock; -+ if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) -+ goto out_unlock; -+ -+ if (reserve) { -+ glob->used_total_memory += amount; -+ if (!himem) -+ glob->used_memory += amount; -+ } -+ ret = 0; -+ out_unlock: -+ spin_unlock(&glob->lock); -+ ttm_check_swapping(glob); -+ -+ return ret; -+} -+ -+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, -+ bool no_wait, bool interruptible, bool himem) -+{ -+ int count = TTM_MEMORY_ALLOC_RETRIES; -+ -+ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) { -+ if (no_wait) -+ return -ENOMEM; -+ if (unlikely(count-- == 0)) -+ return -ENOMEM; -+ ttm_shrink(glob, false, memory + (memory >> 2) + 16); -+ } -+ -+ return 0; -+} -+ -+size_t ttm_round_pot(size_t size) -+{ -+ if ((size & (size - 1)) == 0) -+ return size; -+ else if (size > PAGE_SIZE) -+ return PAGE_ALIGN(size); -+ else { -+ size_t tmp_size = 4; -+ -+ while (tmp_size < size) -+ tmp_size <<= 1; -+ -+ return tmp_size; -+ } -+ return 0; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h ---- a/drivers/gpu/drm/psb/ttm/ttm_memory.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,154 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+ -+#ifndef TTM_MEMORY_H -+#define TTM_MEMORY_H -+ -+#include <linux/workqueue.h> -+#include <linux/spinlock.h> -+#include <linux/wait.h> -+ -+/** -+ * struct ttm_mem_shrink - callback to shrink TTM memory usage. -+ * -+ * @do_shrink: The callback function. -+ * -+ * Arguments to the do_shrink functions are intended to be passed using -+ * inheritance. That is, the argument class derives from struct ttm_mem_srink, -+ * and can be accessed using container_of(). -+ */ -+ -+struct ttm_mem_shrink { -+ int (*do_shrink) (struct ttm_mem_shrink *); -+}; -+ -+/** -+ * struct ttm_mem_global - Global memory accounting structure. -+ * -+ * @shrink: A single callback to shrink TTM memory usage. Extend this -+ * to a linked list to be able to handle multiple callbacks when needed. -+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We -+ * need a separate workqueue since it will spend a lot of time waiting -+ * for the GPU, and this will otherwise block other workqueue tasks(?) -+ * At this point we use only a single-threaded workqueue. -+ * @work: The workqueue callback for the shrink queue. -+ * @queue: Wait queue for processes suspended waiting for memory. -+ * @lock: Lock to protect the @shrink - and the memory accounting members, -+ * that is, essentially the whole structure with some exceptions. -+ * @emer_memory: Lowmem memory limit available for root. -+ * @max_memory: Lowmem memory limit available for non-root. -+ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in. -+ * @used_memory: Currently used lowmem memory. -+ * @used_total_memory: Currently used total (lowmem + highmem) memory. -+ * @total_memory_swap_limit: Total memory limit where the shrink workqueue -+ * kicks in. -+ * @max_total_memory: Total memory available to non-root processes. -+ * @emer_total_memory: Total memory available to root processes. -+ * -+ * Note that this structure is not per device. It should be global for all -+ * graphics devices. -+ */ -+ -+struct ttm_mem_global { -+ struct ttm_mem_shrink *shrink; -+ struct workqueue_struct *swap_queue; -+ struct work_struct work; -+ wait_queue_head_t queue; -+ spinlock_t lock; -+ uint64_t emer_memory; -+ uint64_t max_memory; -+ uint64_t swap_limit; -+ uint64_t used_memory; -+ uint64_t used_total_memory; -+ uint64_t total_memory_swap_limit; -+ uint64_t max_total_memory; -+ uint64_t emer_total_memory; -+}; -+ -+/** -+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object -+ * -+ * @shrink: The object to initialize. -+ * @func: The callback function. -+ */ -+ -+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, -+ int (*func) (struct ttm_mem_shrink *)) -+{ -+ shrink->do_shrink = func; -+} -+ -+/** -+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. -+ * -+ * @glob: The struct ttm_mem_global object to register with. -+ * @shrink: An initialized struct ttm_mem_shrink object to register. -+ * -+ * Returns: -+ * -EBUSY: There's already a callback registered. (May change). -+ */ -+ -+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, -+ struct ttm_mem_shrink *shrink) -+{ -+ spin_lock(&glob->lock); -+ if (glob->shrink != NULL) { -+ spin_unlock(&glob->lock); -+ return -EBUSY; -+ } -+ glob->shrink = shrink; -+ spin_unlock(&glob->lock); -+ return 0; -+} -+ -+/** -+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. -+ * -+ * @glob: The struct ttm_mem_global object to unregister from. -+ * @shrink: A previously registert struct ttm_mem_shrink object. -+ * -+ */ -+ -+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, -+ struct ttm_mem_shrink *shrink) -+{ -+ spin_lock(&glob->lock); -+ BUG_ON(glob->shrink != shrink); -+ glob->shrink = NULL; -+ spin_unlock(&glob->lock); -+} -+ -+extern int ttm_mem_global_init(struct ttm_mem_global *glob); -+extern void ttm_mem_global_release(struct ttm_mem_global *glob); -+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, -+ bool no_wait, bool interruptible, bool himem); -+extern void ttm_mem_global_free(struct ttm_mem_global *glob, -+ uint64_t amount, bool himem); -+extern size_t ttm_round_pot(size_t size); -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c ---- a/drivers/gpu/drm/psb/ttm/ttm_object.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_object.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,444 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+/** @file ttm_ref_object.c -+ * -+ * Base- and reference object implementation for the various -+ * ttm objects. Implements reference counting, minimal security checks -+ * and release on file close. -+ */ -+ -+/** -+ * struct ttm_object_file -+ * -+ * @tdev: Pointer to the ttm_object_device. -+ * -+ * @lock: Lock that protects the ref_list list and the -+ * ref_hash hash tables. -+ * -+ * @ref_list: List of ttm_ref_objects to be destroyed at -+ * file release. -+ * -+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, -+ * for fast lookup of ref objects given a base object. -+ */ -+ -+#include "ttm/ttm_object.h" -+#include <linux/list.h> -+#include <linux/spinlock.h> -+#include <linux/slab.h> -+#include <asm/atomic.h> -+ -+struct ttm_object_file { -+ struct ttm_object_device *tdev; -+ rwlock_t lock; -+ struct list_head ref_list; -+ struct drm_open_hash ref_hash[TTM_REF_NUM]; -+ struct kref refcount; -+}; -+ -+/** -+ * struct ttm_object_device -+ * -+ * @object_lock: lock that protects the object_hash hash table. -+ * -+ * @object_hash: hash table for fast lookup of object global names. -+ * -+ * @object_count: Per device object count. -+ * -+ * This is the per-device data structure needed for ttm object management. -+ */ -+ -+struct ttm_object_device { -+ rwlock_t object_lock; -+ struct drm_open_hash object_hash; -+ atomic_t object_count; -+ struct ttm_mem_global *mem_glob; -+}; -+ -+/** -+ * struct ttm_ref_object -+ * -+ * @hash: Hash entry for the per-file object reference hash. -+ * -+ * @head: List entry for the per-file list of ref-objects. -+ * -+ * @kref: Ref count. -+ * -+ * @obj: Base object this ref object is referencing. -+ * -+ * @ref_type: Type of ref object. -+ * -+ * This is similar to an idr object, but it also has a hash table entry -+ * that allows lookup with a pointer to the referenced object as a key. In -+ * that way, one can easily detect whether a base object is referenced by -+ * a particular ttm_object_file. It also carries a ref count to avoid creating -+ * multiple ref objects if a ttm_object_file references the same base object more -+ * than once. -+ */ -+ -+struct ttm_ref_object { -+ struct drm_hash_item hash; -+ struct list_head head; -+ struct kref kref; -+ struct ttm_base_object *obj; -+ enum ttm_ref_type ref_type; -+ struct ttm_object_file *tfile; -+}; -+ -+static inline struct ttm_object_file * -+ttm_object_file_ref(struct ttm_object_file *tfile) -+{ -+ kref_get(&tfile->refcount); -+ return tfile; -+} -+ -+static void ttm_object_file_destroy(struct kref *kref) -+{ -+ struct ttm_object_file *tfile = -+ container_of(kref, struct ttm_object_file, refcount); -+ -+// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile); -+ kfree(tfile); -+} -+ -+ -+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) -+{ -+ struct ttm_object_file *tfile = *p_tfile; -+ -+ *p_tfile = NULL; -+ kref_put(&tfile->refcount, ttm_object_file_destroy); -+} -+ -+ -+int ttm_base_object_init(struct ttm_object_file *tfile, -+ struct ttm_base_object *base, -+ bool shareable, -+ enum ttm_object_type object_type, -+ void (*refcount_release) (struct ttm_base_object **), -+ void (*ref_obj_release) (struct ttm_base_object *, -+ enum ttm_ref_type ref_type)) -+{ -+ struct ttm_object_device *tdev = tfile->tdev; -+ int ret; -+ -+ base->shareable = shareable; -+ base->tfile = ttm_object_file_ref(tfile); -+ base->refcount_release = refcount_release; -+ base->ref_obj_release = ref_obj_release; -+ base->object_type = object_type; -+ write_lock(&tdev->object_lock); -+ kref_init(&base->refcount); -+ ret = drm_ht_just_insert_please(&tdev->object_hash, -+ &base->hash, -+ (unsigned long)base, 31, 0, 0); -+ write_unlock(&tdev->object_lock); -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ -+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); -+ if (unlikely(ret != 0)) -+ goto out_err1; -+ -+ ttm_base_object_unref(&base); -+ -+ return 0; -+ out_err1: -+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); -+ out_err0: -+ return ret; -+} -+ -+static void ttm_release_base(struct kref *kref) -+{ -+ struct ttm_base_object *base = -+ container_of(kref, struct ttm_base_object, refcount); -+ struct ttm_object_device *tdev = base->tfile->tdev; -+ -+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); -+ write_unlock(&tdev->object_lock); -+ if (base->refcount_release) { -+ ttm_object_file_unref(&base->tfile); -+ base->refcount_release(&base); -+ } -+ write_lock(&tdev->object_lock); -+} -+ -+void ttm_base_object_unref(struct ttm_base_object **p_base) -+{ -+ struct ttm_base_object *base = *p_base; -+ struct ttm_object_device *tdev = base->tfile->tdev; -+ -+ // printk(KERN_INFO "TTM base object unref.\n"); -+ *p_base = NULL; -+ -+ /* -+ * Need to take the lock here to avoid racing with -+ * users trying to look up the object. -+ */ -+ -+ write_lock(&tdev->object_lock); -+ (void)kref_put(&base->refcount, &ttm_release_base); -+ write_unlock(&tdev->object_lock); -+} -+ -+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, -+ uint32_t key) -+{ -+ struct ttm_object_device *tdev = tfile->tdev; -+ struct ttm_base_object *base; -+ struct drm_hash_item *hash; -+ int ret; -+ -+ read_lock(&tdev->object_lock); -+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash); -+ -+ if (likely(ret == 0)) { -+ base = drm_hash_entry(hash, struct ttm_base_object, hash); -+ kref_get(&base->refcount); -+ } -+ read_unlock(&tdev->object_lock); -+ -+ if (unlikely(ret != 0)) -+ return NULL; -+ -+ if (tfile != base->tfile && !base->shareable) { -+ printk(KERN_ERR "Attempted access of non-shareable object.\n"); -+ ttm_base_object_unref(&base); -+ return NULL; -+ } -+ -+ return base; -+} -+ -+int ttm_ref_object_add(struct ttm_object_file *tfile, -+ struct ttm_base_object *base, -+ enum ttm_ref_type ref_type, bool *existed) -+{ -+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; -+ struct ttm_ref_object *ref; -+ struct drm_hash_item *hash; -+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; -+ int ret = -EINVAL; -+ -+ if (existed != NULL) -+ *existed = true; -+ -+ while (ret == -EINVAL) { -+ read_lock(&tfile->lock); -+ ret = drm_ht_find_item(ht, base->hash.key, &hash); -+ -+ if (ret == 0) { -+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash); -+ kref_get(&ref->kref); -+ read_unlock(&tfile->lock); -+ break; -+ } -+ -+ read_unlock(&tfile->lock); -+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false); -+ if (unlikely(ret != 0)) -+ return ret; -+ ref = kmalloc(sizeof(*ref), GFP_KERNEL); -+ if (unlikely(ref == NULL)) { -+ ttm_mem_global_free(mem_glob, sizeof(*ref), false); -+ return -ENOMEM; -+ } -+ -+ ref->hash.key = base->hash.key; -+ ref->obj = base; -+ ref->tfile = tfile; -+ ref->ref_type = ref_type; -+ kref_init(&ref->kref); -+ -+ write_lock(&tfile->lock); -+ ret = drm_ht_insert_item(ht, &ref->hash); -+ -+ if (likely(ret == 0)) { -+ list_add_tail(&ref->head, &tfile->ref_list); -+ kref_get(&base->refcount); -+ write_unlock(&tfile->lock); -+ if (existed != NULL) -+ *existed = false; -+ break; -+ } -+ -+ write_unlock(&tfile->lock); -+ BUG_ON(ret != -EINVAL); -+ -+ ttm_mem_global_free(mem_glob, sizeof(*ref), false); -+ kfree(ref); -+ } -+ -+ return ret; -+} -+ -+static void ttm_ref_object_release(struct kref *kref) -+{ -+ struct ttm_ref_object *ref = -+ container_of(kref, struct ttm_ref_object, kref); -+ struct ttm_base_object *base = ref->obj; -+ struct ttm_object_file *tfile = ref->tfile; -+ struct drm_open_hash *ht; -+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; -+ -+ ht = &tfile->ref_hash[ref->ref_type]; -+ (void)drm_ht_remove_item(ht, &ref->hash); -+ list_del(&ref->head); -+ write_unlock(&tfile->lock); -+ -+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) -+ base->ref_obj_release(base, ref->ref_type); -+ -+ ttm_base_object_unref(&ref->obj); -+ ttm_mem_global_free(mem_glob, sizeof(*ref), false); -+ kfree(ref); -+ write_lock(&tfile->lock); -+} -+ -+int ttm_ref_object_base_unref(struct ttm_object_file *tfile, -+ unsigned long key, enum ttm_ref_type ref_type) -+{ -+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; -+ struct ttm_ref_object *ref; -+ struct drm_hash_item *hash; -+ int ret; -+ -+ write_lock(&tfile->lock); -+ ret = drm_ht_find_item(ht, key, &hash); -+ if (unlikely(ret != 0)) { -+ write_unlock(&tfile->lock); -+ return -EINVAL; -+ } -+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash); -+ kref_put(&ref->kref, ttm_ref_object_release); -+ write_unlock(&tfile->lock); -+ return 0; -+} -+ -+void ttm_object_file_release(struct ttm_object_file **p_tfile) -+{ -+ struct ttm_ref_object *ref; -+ struct list_head *list; -+ unsigned int i; -+ struct ttm_object_file *tfile = *p_tfile; -+ -+ *p_tfile = NULL; -+ write_lock(&tfile->lock); -+ -+ /* -+ * Since we release the lock within the loop, we have to -+ * restart it from the beginning each time. -+ */ -+ -+ while (!list_empty(&tfile->ref_list)) { -+ list = tfile->ref_list.next; -+ ref = list_entry(list, struct ttm_ref_object, head); -+ ttm_ref_object_release(&ref->kref); -+ } -+ -+ for (i = 0; i < TTM_REF_NUM; ++i) { -+ drm_ht_remove(&tfile->ref_hash[i]); -+ } -+ -+ write_unlock(&tfile->lock); -+ ttm_object_file_unref(&tfile); -+} -+ -+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, -+ unsigned int hash_order) -+{ -+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); -+ unsigned int i; -+ unsigned int j = 0; -+ int ret; -+ -+ if (unlikely(tfile == NULL)) -+ return NULL; -+ -+ rwlock_init(&tfile->lock); -+ tfile->tdev = tdev; -+ kref_init(&tfile->refcount); -+ INIT_LIST_HEAD(&tfile->ref_list); -+ -+ for (i = 0; i < TTM_REF_NUM; ++i) { -+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order); -+ if (ret) { -+ j = i; -+ goto out_err; -+ } -+ } -+ -+ return tfile; -+ out_err: -+ for (i = 0; i < j; ++i) { -+ drm_ht_remove(&tfile->ref_hash[i]); -+ } -+ kfree(tfile); -+ -+ return NULL; -+} -+ -+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global -+ *mem_glob, -+ unsigned int hash_order) -+{ -+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); -+ int ret; -+ -+ if (unlikely(tdev == NULL)) -+ return NULL; -+ -+ tdev->mem_glob = mem_glob; -+ rwlock_init(&tdev->object_lock); -+ atomic_set(&tdev->object_count, 0); -+ ret = drm_ht_create(&tdev->object_hash, hash_order); -+ -+ if (likely(ret == 0)) -+ return tdev; -+ -+ kfree(tdev); -+ return NULL; -+} -+ -+void ttm_object_device_release(struct ttm_object_device **p_tdev) -+{ -+ struct ttm_object_device *tdev = *p_tdev; -+ -+ *p_tdev = NULL; -+ -+ write_lock(&tdev->object_lock); -+ drm_ht_remove(&tdev->object_hash); -+ write_unlock(&tdev->object_lock); -+ -+ kfree(tdev); -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h ---- a/drivers/gpu/drm/psb/ttm/ttm_object.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_object.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,269 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+/** @file ttm_ref_object.h -+ * -+ * Base- and reference object implementation for the various -+ * ttm objects. Implements reference counting, minimal security checks -+ * and release on file close. -+ */ -+ -+#ifndef _TTM_OBJECT_H_ -+#define _TTM_OBJECT_H_ -+ -+#include <linux/list.h> -+#include <drm/drm_hashtab.h> -+#include <linux/kref.h> -+#include <ttm/ttm_memory.h> -+ -+/** -+ * enum ttm_ref_type -+ * -+ * Describes what type of reference a ref object holds. -+ * -+ * TTM_REF_USAGE is a simple refcount on a base object. -+ * -+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a -+ * buffer object. -+ * -+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a -+ * buffer object. -+ * -+ */ -+ -+enum ttm_ref_type { -+ TTM_REF_USAGE, -+ TTM_REF_SYNCCPU_READ, -+ TTM_REF_SYNCCPU_WRITE, -+ TTM_REF_NUM -+}; -+ -+/** -+ * enum ttm_object_type -+ * -+ * One entry per ttm object type. -+ * Device-specific types should use the -+ * ttm_driver_typex types. -+ */ -+ -+enum ttm_object_type { -+ ttm_fence_type, -+ ttm_buffer_type, -+ ttm_lock_type, -+ ttm_driver_type0 = 256, -+ ttm_driver_type1 -+}; -+ -+struct ttm_object_file; -+struct ttm_object_device; -+ -+/** -+ * struct ttm_base_object -+ * -+ * @hash: hash entry for the per-device object hash. -+ * @type: derived type this object is base class for. -+ * @shareable: Other ttm_object_files can access this object. -+ * -+ * @tfile: Pointer to ttm_object_file of the creator. -+ * NULL if the object was not created by a user request. -+ * (kernel object). -+ * -+ * @refcount: Number of references to this object, not -+ * including the hash entry. A reference to a base object can -+ * only be held by a ref object. -+ * -+ * @refcount_release: A function to be called when there are -+ * no more references to this object. This function should -+ * destroy the object (or make sure destruction eventually happens), -+ * and when it is called, the object has -+ * already been taken out of the per-device hash. The parameter -+ * "base" should be set to NULL by the function. -+ * -+ * @ref_obj_release: A function to be called when a reference object -+ * with another ttm_ref_type than TTM_REF_USAGE is deleted. -+ * this function may, for example, release a lock held by a user-space -+ * process. -+ * -+ * This struct is intended to be used as a base struct for objects that -+ * are visible to user-space. It provides a global name, race-safe -+ * access and refcounting, minimal access contol and hooks for unref actions. -+ */ -+ -+struct ttm_base_object { -+ struct drm_hash_item hash; -+ enum ttm_object_type object_type; -+ bool shareable; -+ struct ttm_object_file *tfile; -+ struct kref refcount; -+ void (*refcount_release) (struct ttm_base_object ** base); -+ void (*ref_obj_release) (struct ttm_base_object * base, -+ enum ttm_ref_type ref_type); -+}; -+ -+/** -+ * ttm_base_object_init -+ * -+ * @tfile: Pointer to a struct ttm_object_file. -+ * @base: The struct ttm_base_object to initialize. -+ * @shareable: This object is shareable with other applcations. -+ * (different @tfile pointers.) -+ * @type: The object type. -+ * @refcount_release: See the struct ttm_base_object description. -+ * @ref_obj_release: See the struct ttm_base_object description. -+ * -+ * Initializes a struct ttm_base_object. -+ */ -+ -+extern int ttm_base_object_init(struct ttm_object_file *tfile, -+ struct ttm_base_object *base, -+ bool shareable, -+ enum ttm_object_type type, -+ void (*refcount_release) (struct ttm_base_object -+ **), -+ void (*ref_obj_release) (struct ttm_base_object -+ *, -+ enum ttm_ref_type -+ ref_type)); -+ -+/** -+ * ttm_base_object_lookup -+ * -+ * @tfile: Pointer to a struct ttm_object_file. -+ * @key: Hash key -+ * -+ * Looks up a struct ttm_base_object with the key @key. -+ * Also verifies that the object is visible to the application, by -+ * comparing the @tfile argument and checking the object shareable flag. -+ */ -+ -+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file -+ *tfile, uint32_t key); -+ -+/** -+ * ttm_base_object_unref -+ * -+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object. -+ * -+ * Decrements the base object refcount and clears the pointer pointed to by -+ * p_base. -+ */ -+ -+extern void ttm_base_object_unref(struct ttm_base_object **p_base); -+ -+/** -+ * ttm_ref_object_add. -+ * -+ * @tfile: A struct ttm_object_file representing the application owning the -+ * ref_object. -+ * @base: The base object to reference. -+ * @ref_type: The type of reference. -+ * @existed: Upon completion, indicates that an identical reference object -+ * already existed, and the refcount was upped on that object instead. -+ * -+ * Adding a ref object to a base object is basically like referencing the -+ * base object, but a user-space application holds the reference. When the -+ * file corresponding to @tfile is closed, all its reference objects are -+ * deleted. A reference object can have different types depending on what -+ * it's intended for. It can be refcounting to prevent object destruction, -+ * When user-space takes a lock, it can add a ref object to that lock to -+ * make sure the lock is released if the application dies. A ref object -+ * will hold a single reference on a base object. -+ */ -+extern int ttm_ref_object_add(struct ttm_object_file *tfile, -+ struct ttm_base_object *base, -+ enum ttm_ref_type ref_type, bool *existed); -+/** -+ * ttm_ref_object_base_unref -+ * -+ * @key: Key representing the base object. -+ * @ref_type: Ref type of the ref object to be dereferenced. -+ * -+ * Unreference a ref object with type @ref_type -+ * on the base object identified by @key. If there are no duplicate -+ * references, the ref object will be destroyed and the base object -+ * will be unreferenced. -+ */ -+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, -+ unsigned long key, -+ enum ttm_ref_type ref_type); -+ -+/** -+ * ttm_object_file_init - initialize a struct ttm_object file -+ * -+ * @tdev: A struct ttm_object device this file is initialized on. -+ * @hash_order: Order of the hash table used to hold the reference objects. -+ * -+ * This is typically called by the file_ops::open function. -+ */ -+ -+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device -+ *tdev, -+ unsigned int hash_order); -+ -+/** -+ * ttm_object_file_release - release data held by a ttm_object_file -+ * -+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release. -+ * *p_tfile will be set to NULL by this function. -+ * -+ * Releases all data associated by a ttm_object_file. -+ * Typically called from file_ops::release. The caller must -+ * ensure that there are no concurrent users of tfile. -+ */ -+ -+extern void ttm_object_file_release(struct ttm_object_file **p_tfile); -+ -+/** -+ * ttm_object device init - initialize a struct ttm_object_device -+ * -+ * @hash_order: Order of hash table used to hash the base objects. -+ * -+ * This function is typically called on device initialization to prepare -+ * data structures needed for ttm base and ref objects. -+ */ -+ -+extern struct ttm_object_device *ttm_object_device_init -+ (struct ttm_mem_global *mem_glob, unsigned int hash_order); -+ -+/** -+ * ttm_object_device_release - release data held by a ttm_object_device -+ * -+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release. -+ * *p_tdev will be set to NULL by this function. -+ * -+ * Releases all data associated by a ttm_object_device. -+ * Typically called from driver::unload before the destruction of the -+ * device private data structure. -+ */ -+ -+extern void ttm_object_device_release(struct ttm_object_device **p_tdev); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c ---- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,178 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_pat_compat.h" -+#include <linux/version.h> -+ -+#include <linux/spinlock.h> -+#include <asm/pgtable.h> -+ -+#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)) -+#include <asm/tlbflush.h> -+#include <asm/msr.h> -+#include <asm/system.h> -+#include <linux/notifier.h> -+#include <linux/cpu.h> -+ -+#ifndef MSR_IA32_CR_PAT -+#define MSR_IA32_CR_PAT 0x0277 -+#endif -+ -+#ifndef _PAGE_PAT -+#define _PAGE_PAT 0x080 -+#endif -+ -+static int ttm_has_pat = 0; -+ -+/* -+ * Used at resume-time when CPU-s are fired up. -+ */ -+ -+static void ttm_pat_ipi_handler(void *notused) -+{ -+ u32 v1, v2; -+ -+ rdmsr(MSR_IA32_CR_PAT, v1, v2); -+ v2 &= 0xFFFFFFF8; -+ v2 |= 0x00000001; -+ wbinvd(); -+ wrmsr(MSR_IA32_CR_PAT, v1, v2); -+ wbinvd(); -+ __flush_tlb_all(); -+} -+ -+static void ttm_pat_enable(void) -+{ -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)) -+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) { -+#else -+ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) { -+#endif -+ printk(KERN_ERR "Timed out setting up CPU PAT.\n"); -+ } -+} -+ -+void ttm_pat_resume(void) -+{ -+ if (unlikely(!ttm_has_pat)) -+ return; -+ -+ ttm_pat_enable(); -+} -+ -+static int psb_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ if (action == CPU_ONLINE) { -+ ttm_pat_resume(); -+ } -+ -+ return 0; -+} -+ -+static struct notifier_block psb_nb = { -+ .notifier_call = psb_cpu_callback, -+ .priority = 1 -+}; -+ -+/* -+ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors. -+ */ -+ -+void ttm_pat_init(void) -+{ -+ if (likely(ttm_has_pat)) -+ return; -+ -+ if (!boot_cpu_has(X86_FEATURE_PAT)) { -+ return; -+ } -+ -+ ttm_pat_enable(); -+ -+ if (num_present_cpus() > 1) -+ register_cpu_notifier(&psb_nb); -+ -+ ttm_has_pat = 1; -+} -+ -+void ttm_pat_takedown(void) -+{ -+ if (unlikely(!ttm_has_pat)) -+ return; -+ -+ if (num_present_cpus() > 1) -+ unregister_cpu_notifier(&psb_nb); -+ -+ ttm_has_pat = 0; -+} -+ -+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) -+{ -+ if (likely(ttm_has_pat)) { -+ pgprot_val(prot) |= _PAGE_PAT; -+ return prot; -+ } else { -+ return pgprot_noncached(prot); -+ } -+} -+ -+#else -+ -+void ttm_pat_init(void) -+{ -+} -+ -+void ttm_pat_takedown(void) -+{ -+} -+ -+void ttm_pat_resume(void) -+{ -+} -+ -+#ifdef CONFIG_X86 -+#include <asm/pat.h> -+ -+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) -+{ -+ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS); -+ -+ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits); -+} -+#else -+pgprot_t pgprot_ttm_x86_wc(pgprot_t prot) -+{ -+ BUG(); -+} -+#endif -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h ---- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,41 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_PAT_COMPAT_ -+#define _TTM_PAT_COMPAT_ -+#include <asm/page.h> -+ -+extern void ttm_pat_init(void); -+extern void ttm_pat_takedown(void); -+extern void ttm_pat_resume(void); -+extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot); -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h ---- a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,96 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_PL_COMMON_H_ -+#define _TTM_PL_COMMON_H_ -+/* -+ * Memory regions for data placement. -+ */ -+ -+#define TTM_PL_SYSTEM 0 -+#define TTM_PL_TT 1 -+#define TTM_PL_VRAM 2 -+#define TTM_PL_PRIV0 3 -+#define TTM_PL_PRIV1 4 -+#define TTM_PL_PRIV2 5 -+#define TTM_PL_PRIV3 6 -+#define TTM_PL_PRIV4 7 -+#define TTM_PL_PRIV5 8 -+#define TTM_PL_CI 9 -+#define TTM_PL_SWAPPED 15 -+ -+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) -+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) -+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) -+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) -+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) -+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) -+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) -+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) -+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) -+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI) -+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) -+#define TTM_PL_MASK_MEM 0x0000FFFF -+ -+/* -+ * Other flags that affects data placement. -+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings -+ * if available. -+ * TTM_PL_FLAG_SHARED means that another application may -+ * reference the buffer. -+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never -+ * be evicted to make room for other buffers. -+ */ -+ -+#define TTM_PL_FLAG_CACHED (1 << 16) -+#define TTM_PL_FLAG_UNCACHED (1 << 17) -+#define TTM_PL_FLAG_WC (1 << 18) -+#define TTM_PL_FLAG_SHARED (1 << 20) -+#define TTM_PL_FLAG_NO_EVICT (1 << 21) -+ -+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ -+ TTM_PL_FLAG_UNCACHED | \ -+ TTM_PL_FLAG_WC) -+ -+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) -+ -+/* -+ * Access flags to be used for CPU- and GPU- mappings. -+ * The idea is that the TTM synchronization mechanism will -+ * allow concurrent READ access and exclusive write access. -+ * Currently GPU- and CPU accesses are exclusive. -+ */ -+ -+#define TTM_ACCESS_READ (1 << 0) -+#define TTM_ACCESS_WRITE (1 << 1) -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c ---- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,468 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include "ttm/ttm_placement_user.h" -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_object.h" -+#include "ttm/ttm_userobj_api.h" -+#include "ttm/ttm_lock.h" -+ -+struct ttm_bo_user_object { -+ struct ttm_base_object base; -+ struct ttm_buffer_object bo; -+}; -+ -+static size_t pl_bo_size = 0; -+ -+static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages) -+{ -+ size_t page_array_size = -+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; -+ -+ if (unlikely(pl_bo_size == 0)) { -+ pl_bo_size = bdev->ttm_bo_extra_size + -+ ttm_round_pot(sizeof(struct ttm_bo_user_object)); -+ } -+ -+ return bdev->ttm_bo_size + 2 * page_array_size; -+} -+ -+static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file -+ *tfile, uint32_t handle) -+{ -+ struct ttm_base_object *base; -+ -+ base = ttm_base_object_lookup(tfile, handle); -+ if (unlikely(base == NULL)) { -+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", -+ (unsigned long)handle); -+ return NULL; -+ } -+ -+ if (unlikely(base->object_type != ttm_buffer_type)) { -+ ttm_base_object_unref(&base); -+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", -+ (unsigned long)handle); -+ return NULL; -+ } -+ -+ return container_of(base, struct ttm_bo_user_object, base); -+} -+ -+struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file -+ *tfile, uint32_t handle) -+{ -+ struct ttm_bo_user_object *user_bo; -+ struct ttm_base_object *base; -+ -+ user_bo = ttm_bo_user_lookup(tfile, handle); -+ if (unlikely(user_bo == NULL)) -+ return NULL; -+ -+ (void)ttm_bo_reference(&user_bo->bo); -+ base = &user_bo->base; -+ ttm_base_object_unref(&base); -+ return &user_bo->bo; -+} -+ -+static void ttm_bo_user_destroy(struct ttm_buffer_object *bo) -+{ -+ struct ttm_bo_user_object *user_bo = -+ container_of(bo, struct ttm_bo_user_object, bo); -+ -+ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false); -+ kfree(user_bo); -+} -+ -+static void ttm_bo_user_release(struct ttm_base_object **p_base) -+{ -+ struct ttm_bo_user_object *user_bo; -+ struct ttm_base_object *base = *p_base; -+ struct ttm_buffer_object *bo; -+ -+ *p_base = NULL; -+ -+ if (unlikely(base == NULL)) -+ return; -+ -+ user_bo = container_of(base, struct ttm_bo_user_object, base); -+ bo = &user_bo->bo; -+ ttm_bo_unref(&bo); -+} -+ -+static void ttm_bo_user_ref_release(struct ttm_base_object *base, -+ enum ttm_ref_type ref_type) -+{ -+ struct ttm_bo_user_object *user_bo = -+ container_of(base, struct ttm_bo_user_object, base); -+ struct ttm_buffer_object *bo = &user_bo->bo; -+ -+ switch (ref_type) { -+ case TTM_REF_SYNCCPU_WRITE: -+ ttm_bo_synccpu_write_release(bo); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+static void ttm_pl_fill_rep(struct ttm_buffer_object *bo, -+ struct ttm_pl_rep *rep) -+{ -+ struct ttm_bo_user_object *user_bo = -+ container_of(bo, struct ttm_bo_user_object, bo); -+ -+ rep->gpu_offset = bo->offset; -+ rep->bo_size = bo->num_pages << PAGE_SHIFT; -+ rep->map_handle = bo->addr_space_offset; -+ rep->placement = bo->mem.flags; -+ rep->handle = user_bo->base.hash.key; -+ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg; -+} -+ -+int ttm_pl_create_ioctl(struct ttm_object_file *tfile, -+ struct ttm_bo_device *bdev, -+ struct ttm_lock *lock, void *data) -+{ -+ union ttm_pl_create_arg *arg = data; -+ struct ttm_pl_create_req *req = &arg->req; -+ struct ttm_pl_rep *rep = &arg->rep; -+ struct ttm_buffer_object *bo; -+ struct ttm_buffer_object *tmp; -+ struct ttm_bo_user_object *user_bo; -+ uint32_t flags; -+ int ret = 0; -+ struct ttm_mem_global *mem_glob = bdev->mem_glob; -+ size_t acc_size = -+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); -+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ flags = req->placement; -+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); -+ if (unlikely(user_bo == NULL)) { -+ ttm_mem_global_free(mem_glob, acc_size, false); -+ return -ENOMEM; -+ } -+ -+ bo = &user_bo->bo; -+ ret = ttm_read_lock(lock, true); -+ if (unlikely(ret != 0)) { -+ ttm_mem_global_free(mem_glob, acc_size, false); -+ kfree(user_bo); -+ return ret; -+ } -+ -+ ret = ttm_buffer_object_init(bdev, bo, req->size, -+ ttm_bo_type_device, flags, -+ req->page_alignment, 0, true, -+ NULL, acc_size, &ttm_bo_user_destroy); -+ ttm_read_unlock(lock); -+ -+ /* -+ * Note that the ttm_buffer_object_init function -+ * would've called the destroy function on failure!! -+ */ -+ -+ if (unlikely(ret != 0)) -+ goto out; -+ -+ tmp = ttm_bo_reference(bo); -+ ret = ttm_base_object_init(tfile, &user_bo->base, -+ flags & TTM_PL_FLAG_SHARED, -+ ttm_buffer_type, -+ &ttm_bo_user_release, -+ &ttm_bo_user_ref_release); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ mutex_lock(&bo->mutex); -+ ttm_pl_fill_rep(bo, rep); -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unref(&bo); -+ out: -+ return 0; -+ out_err: -+ ttm_bo_unref(&tmp); -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, -+ struct ttm_bo_device *bdev, -+ struct ttm_lock *lock, void *data) -+{ -+ union ttm_pl_create_ub_arg *arg = data; -+ struct ttm_pl_create_ub_req *req = &arg->req; -+ struct ttm_pl_rep *rep = &arg->rep; -+ struct ttm_buffer_object *bo; -+ struct ttm_buffer_object *tmp; -+ struct ttm_bo_user_object *user_bo; -+ uint32_t flags; -+ int ret = 0; -+ struct ttm_mem_global *mem_glob = bdev->mem_glob; -+ size_t acc_size = -+ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); -+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ flags = req->placement; -+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); -+ if (unlikely(user_bo == NULL)) { -+ ttm_mem_global_free(mem_glob, acc_size, false); -+ return -ENOMEM; -+ } -+ ret = ttm_read_lock(lock, true); -+ if (unlikely(ret != 0)) { -+ ttm_mem_global_free(mem_glob, acc_size, false); -+ kfree(user_bo); -+ return ret; -+ } -+ bo = &user_bo->bo; -+ ret = ttm_buffer_object_init(bdev, bo, req->size, -+ ttm_bo_type_user, flags, -+ req->page_alignment, req->user_address, -+ true, NULL, acc_size, &ttm_bo_user_destroy); -+ -+ /* -+ * Note that the ttm_buffer_object_init function -+ * would've called the destroy function on failure!! -+ */ -+ ttm_read_unlock(lock); -+ if (unlikely(ret != 0)) -+ goto out; -+ -+ tmp = ttm_bo_reference(bo); -+ ret = ttm_base_object_init(tfile, &user_bo->base, -+ flags & TTM_PL_FLAG_SHARED, -+ ttm_buffer_type, -+ &ttm_bo_user_release, -+ &ttm_bo_user_ref_release); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ -+ mutex_lock(&bo->mutex); -+ ttm_pl_fill_rep(bo, rep); -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unref(&bo); -+ out: -+ return 0; -+ out_err: -+ ttm_bo_unref(&tmp); -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ union ttm_pl_reference_arg *arg = data; -+ struct ttm_pl_rep *rep = &arg->rep; -+ struct ttm_bo_user_object *user_bo; -+ struct ttm_buffer_object *bo; -+ struct ttm_base_object *base; -+ int ret; -+ -+ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle); -+ if (unlikely(user_bo == NULL)) { -+ printk(KERN_ERR "Could not reference buffer object.\n"); -+ return -EINVAL; -+ } -+ -+ bo = &user_bo->bo; -+ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); -+ if (unlikely(ret != 0)) { -+ printk(KERN_ERR -+ "Could not add a reference to buffer object.\n"); -+ goto out; -+ } -+ -+ mutex_lock(&bo->mutex); -+ ttm_pl_fill_rep(bo, rep); -+ mutex_unlock(&bo->mutex); -+ -+ out: -+ base = &user_bo->base; -+ ttm_base_object_unref(&base); -+ return ret; -+} -+ -+int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ struct ttm_pl_reference_req *arg = data; -+ -+ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE); -+} -+ -+int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ struct ttm_pl_synccpu_arg *arg = data; -+ struct ttm_bo_user_object *user_bo; -+ struct ttm_buffer_object *bo; -+ struct ttm_base_object *base; -+ bool existed; -+ int ret; -+ -+ switch (arg->op) { -+ case TTM_PL_SYNCCPU_OP_GRAB: -+ user_bo = ttm_bo_user_lookup(tfile, arg->handle); -+ if (unlikely(user_bo == NULL)) { -+ printk(KERN_ERR -+ "Could not find buffer object for synccpu.\n"); -+ return -EINVAL; -+ } -+ bo = &user_bo->bo; -+ base = &user_bo->base; -+ ret = ttm_bo_synccpu_write_grab(bo, -+ arg->access_mode & -+ TTM_PL_SYNCCPU_MODE_NO_BLOCK); -+ if (unlikely(ret != 0)) { -+ ttm_base_object_unref(&base); -+ goto out; -+ } -+ ret = ttm_ref_object_add(tfile, &user_bo->base, -+ TTM_REF_SYNCCPU_WRITE, &existed); -+ if (existed || ret != 0) -+ ttm_bo_synccpu_write_release(bo); -+ ttm_base_object_unref(&base); -+ break; -+ case TTM_PL_SYNCCPU_OP_RELEASE: -+ ret = ttm_ref_object_base_unref(tfile, arg->handle, -+ TTM_REF_SYNCCPU_WRITE); -+ break; -+ default: -+ ret = -EINVAL; -+ break; -+ } -+ out: -+ return ret; -+} -+ -+int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, -+ struct ttm_lock *lock, void *data) -+{ -+ union ttm_pl_setstatus_arg *arg = data; -+ struct ttm_pl_setstatus_req *req = &arg->req; -+ struct ttm_pl_rep *rep = &arg->rep; -+ struct ttm_buffer_object *bo; -+ struct ttm_bo_device *bdev; -+ int ret; -+ -+ bo = ttm_buffer_object_lookup(tfile, req->handle); -+ if (unlikely(bo == NULL)) { -+ printk(KERN_ERR -+ "Could not find buffer object for setstatus.\n"); -+ return -EINVAL; -+ } -+ -+ bdev = bo->bdev; -+ -+ ret = ttm_read_lock(lock, true); -+ if (unlikely(ret != 0)) -+ goto out_err0; -+ -+ ret = ttm_bo_reserve(bo, true, false, false, 0); -+ if (unlikely(ret != 0)) -+ goto out_err1; -+ -+ ret = ttm_bo_wait_cpu(bo, false); -+ if (unlikely(ret != 0)) -+ goto out_err2; -+ -+ mutex_lock(&bo->mutex); -+ ret = ttm_bo_check_placement(bo, req->set_placement, -+ req->clr_placement); -+ if (unlikely(ret != 0)) -+ goto out_err2; -+ -+ bo->proposed_flags = (bo->proposed_flags | req->set_placement) -+ & ~req->clr_placement; -+ ret = ttm_buffer_object_validate(bo, true, false); -+ if (unlikely(ret != 0)) -+ goto out_err2; -+ -+ ttm_pl_fill_rep(bo, rep); -+ out_err2: -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unreserve(bo); -+ out_err1: -+ ttm_read_unlock(lock); -+ out_err0: -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data) -+{ -+ struct ttm_pl_waitidle_arg *arg = data; -+ struct ttm_buffer_object *bo; -+ int ret; -+ -+ bo = ttm_buffer_object_lookup(tfile, arg->handle); -+ if (unlikely(bo == NULL)) { -+ printk(KERN_ERR "Could not find buffer object for waitidle.\n"); -+ return -EINVAL; -+ } -+ -+ ret = -+ ttm_bo_block_reservation(bo, true, -+ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); -+ if (unlikely(ret != 0)) -+ goto out; -+ mutex_lock(&bo->mutex); -+ ret = ttm_bo_wait(bo, -+ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY, -+ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); -+ mutex_unlock(&bo->mutex); -+ ttm_bo_unblock_reservation(bo); -+ out: -+ ttm_bo_unref(&bo); -+ return ret; -+} -+ -+int ttm_pl_verify_access(struct ttm_buffer_object *bo, -+ struct ttm_object_file *tfile) -+{ -+ struct ttm_bo_user_object *ubo; -+ -+ /* -+ * Check bo subclass. -+ */ -+ -+ if (unlikely(bo->destroy != &ttm_bo_user_destroy)) -+ return -EPERM; -+ -+ ubo = container_of(bo, struct ttm_bo_user_object, bo); -+ if (likely(ubo->base.shareable || ubo->base.tfile == tfile)) -+ return 0; -+ -+ return -EPERM; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h ---- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,259 @@ -+/************************************************************************** -+ * -+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ **************************************************************************/ -+/* -+ * Authors -+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_PLACEMENT_USER_H_ -+#define _TTM_PLACEMENT_USER_H_ -+ -+#if !defined(__KERNEL__) && !defined(_KERNEL) -+#include <stdint.h> -+#else -+#include <linux/kernel.h> -+#endif -+ -+#include "ttm/ttm_placement_common.h" -+ -+#define TTM_PLACEMENT_MAJOR 0 -+#define TTM_PLACEMENT_MINOR 1 -+#define TTM_PLACEMENT_PL 0 -+#define TTM_PLACEMENT_DATE "080819" -+ -+/** -+ * struct ttm_pl_create_req -+ * -+ * @size: The buffer object size. -+ * @placement: Flags that indicate initial acceptable -+ * placement. -+ * @page_alignment: Required alignment in pages. -+ * -+ * Input to the TTM_BO_CREATE ioctl. -+ */ -+ -+struct ttm_pl_create_req { -+ uint64_t size; -+ uint32_t placement; -+ uint32_t page_alignment; -+}; -+ -+/** -+ * struct ttm_pl_create_ub_req -+ * -+ * @size: The buffer object size. -+ * @user_address: User-space address of the memory area that -+ * should be used to back the buffer object cast to 64-bit. -+ * @placement: Flags that indicate initial acceptable -+ * placement. -+ * @page_alignment: Required alignment in pages. -+ * -+ * Input to the TTM_BO_CREATE_UB ioctl. -+ */ -+ -+struct ttm_pl_create_ub_req { -+ uint64_t size; -+ uint64_t user_address; -+ uint32_t placement; -+ uint32_t page_alignment; -+}; -+ -+/** -+ * struct ttm_pl_rep -+ * -+ * @gpu_offset: The current offset into the memory region used. -+ * This can be used directly by the GPU if there are no -+ * additional GPU mapping procedures used by the driver. -+ * -+ * @bo_size: Actual buffer object size. -+ * -+ * @map_handle: Offset into the device address space. -+ * Used for map, seek, read, write. This will never change -+ * during the lifetime of an object. -+ * -+ * @placement: Flag indicating the placement status of -+ * the buffer object using the TTM_PL flags above. -+ * -+ * @sync_object_arg: Used for user-space synchronization and -+ * depends on the synchronization model used. If fences are -+ * used, this is the buffer_object::fence_type_mask -+ * -+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and -+ * TTM_PL_SETSTATUS ioctls. -+ */ -+ -+struct ttm_pl_rep { -+ uint64_t gpu_offset; -+ uint64_t bo_size; -+ uint64_t map_handle; -+ uint32_t placement; -+ uint32_t handle; -+ uint32_t sync_object_arg; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_pl_setstatus_req -+ * -+ * @set_placement: Placement flags to set. -+ * -+ * @clr_placement: Placement flags to clear. -+ * -+ * @handle: The object handle -+ * -+ * Input to the TTM_PL_SETSTATUS ioctl. -+ */ -+ -+struct ttm_pl_setstatus_req { -+ uint32_t set_placement; -+ uint32_t clr_placement; -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_pl_reference_req -+ * -+ * @handle: The object to put a reference on. -+ * -+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls. -+ */ -+ -+struct ttm_pl_reference_req { -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/* -+ * ACCESS mode flags for SYNCCPU. -+ * -+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not -+ * writing to the buffer. -+ * -+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not -+ * accessing the buffer. -+ * -+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait -+ * for GPU accesses to finish but return -EBUSY. -+ * -+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable -+ * memory while synchronized for CPU. -+ */ -+ -+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ -+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE -+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2) -+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3) -+ -+/** -+ * struct ttm_pl_synccpu_arg -+ * -+ * @handle: The object to synchronize. -+ * -+ * @access_mode: access mode indicated by the -+ * TTM_SYNCCPU_MODE flags. -+ * -+ * @op: indicates whether to grab or release the -+ * buffer for cpu usage. -+ * -+ * Input to the TTM_PL_SYNCCPU ioctl. -+ */ -+ -+struct ttm_pl_synccpu_arg { -+ uint32_t handle; -+ uint32_t access_mode; -+ enum { -+ TTM_PL_SYNCCPU_OP_GRAB, -+ TTM_PL_SYNCCPU_OP_RELEASE -+ } op; -+ uint32_t pad64; -+}; -+ -+/* -+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl. -+ * -+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling -+ * wait. -+ * -+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU, -+ * but return -EBUSY if the buffer is busy. -+ */ -+ -+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0) -+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1) -+ -+/** -+ * struct ttm_waitidle_arg -+ * -+ * @handle: The object to synchronize. -+ * -+ * @mode: wait mode indicated by the -+ * TTM_SYNCCPU_MODE flags. -+ * -+ * Argument to the TTM_BO_WAITIDLE ioctl. -+ */ -+ -+struct ttm_pl_waitidle_arg { -+ uint32_t handle; -+ uint32_t mode; -+}; -+ -+union ttm_pl_create_arg { -+ struct ttm_pl_create_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_reference_arg { -+ struct ttm_pl_reference_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_setstatus_arg { -+ struct ttm_pl_setstatus_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_create_ub_arg { -+ struct ttm_pl_create_ub_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+/* -+ * Ioctl offsets. -+ */ -+ -+#define TTM_PL_CREATE 0x00 -+#define TTM_PL_REFERENCE 0x01 -+#define TTM_PL_UNREF 0x02 -+#define TTM_PL_SYNCCPU 0x03 -+#define TTM_PL_WAITIDLE 0x04 -+#define TTM_PL_SETSTATUS 0x05 -+#define TTM_PL_CREATE_UB 0x06 -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h ---- a/drivers/gpu/drm/psb/ttm/ttm_regman.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,74 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_REGMAN_H_ -+#define _TTM_REGMAN_H_ -+ -+#include <linux/list.h> -+ -+struct ttm_fence_object; -+ -+struct ttm_reg { -+ struct list_head head; -+ struct ttm_fence_object *fence; -+ uint32_t fence_type; -+ uint32_t new_fence_type; -+}; -+ -+struct ttm_reg_manager { -+ struct list_head free; -+ struct list_head lru; -+ struct list_head unfenced; -+ -+ int (*reg_reusable)(const struct ttm_reg *reg, const void *data); -+ void (*reg_destroy)(struct ttm_reg *reg); -+}; -+ -+extern int ttm_regs_alloc(struct ttm_reg_manager *manager, -+ const void *data, -+ uint32_t fence_class, -+ uint32_t fence_type, -+ int interruptible, -+ int no_wait, -+ struct ttm_reg **reg); -+ -+extern void ttm_regs_fence(struct ttm_reg_manager *regs, -+ struct ttm_fence_object *fence); -+ -+extern void ttm_regs_free(struct ttm_reg_manager *manager); -+extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg); -+extern void ttm_regs_init(struct ttm_reg_manager *manager, -+ int (*reg_reusable)(const struct ttm_reg *, -+ const void *), -+ void (*reg_destroy)(struct ttm_reg *)); -+ -+#endif -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c ---- a/drivers/gpu/drm/psb/ttm/ttm_tt.c 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,655 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#include <linux/version.h> -+#include <linux/vmalloc.h> -+#include <linux/sched.h> -+#include <linux/highmem.h> -+#include <linux/pagemap.h> -+#include <linux/file.h> -+#include <linux/swap.h> -+#include "ttm/ttm_bo_driver.h" -+#include "ttm/ttm_placement_common.h" -+ -+static int ttm_tt_swapin(struct ttm_tt *ttm); -+ -+#if defined( CONFIG_X86 ) -+static void ttm_tt_clflush_page(struct page *page) -+{ -+ uint8_t *page_virtual; -+ unsigned int i; -+ -+ if (unlikely(page == NULL)) -+ return; -+ -+ page_virtual = kmap_atomic(page, KM_USER0); -+ -+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) -+ clflush(page_virtual + i); -+ -+ kunmap_atomic(page_virtual, KM_USER0); -+} -+ -+static void ttm_tt_cache_flush_clflush(struct page *pages[], -+ unsigned long num_pages) -+{ -+ unsigned long i; -+ -+ mb(); -+ for (i = 0; i < num_pages; ++i) -+ ttm_tt_clflush_page(*pages++); -+ mb(); -+} -+#else -+static void ttm_tt_ipi_handler(void *null) -+{ -+ ; -+} -+#endif -+ -+void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) -+{ -+ -+#if defined( CONFIG_X86 ) -+ if (cpu_has_clflush) { -+ ttm_tt_cache_flush_clflush(pages, num_pages); -+ return; -+ } -+#else -+ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0) -+ printk(KERN_ERR "Timed out waiting for drm cache flush.\n"); -+#endif -+} -+ -+/** -+ * Allocates storage for pointers to the pages that back the ttm. -+ * -+ * Uses kmalloc if possible. Otherwise falls back to vmalloc. -+ */ -+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) -+{ -+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); -+ ttm->pages = NULL; -+ -+ if (size <= PAGE_SIZE) -+ ttm->pages = kzalloc(size, GFP_KERNEL); -+ -+ if (!ttm->pages) { -+ ttm->pages = vmalloc_user(size); -+ if (ttm->pages) -+ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC; -+ } -+} -+ -+static void ttm_tt_free_page_directory(struct ttm_tt *ttm) -+{ -+ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { -+ vfree(ttm->pages); -+ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC; -+ } else { -+ kfree(ttm->pages); -+ } -+ ttm->pages = NULL; -+} -+ -+static struct page *ttm_tt_alloc_page(void) -+{ -+ return alloc_page(GFP_HIGHUSER | __GFP_ZERO); -+} -+ -+static void ttm_tt_free_user_pages(struct ttm_tt *ttm) -+{ -+ int write; -+ int dirty; -+ struct page *page; -+ int i; -+ struct ttm_backend *be = ttm->be; -+ -+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER)); -+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0); -+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0); -+ -+ if (be) -+ be->func->clear(be); -+ -+ for (i = 0; i < ttm->num_pages; ++i) { -+ page = ttm->pages[i]; -+ if (page == NULL) -+ continue; -+ -+ if (page == ttm->dummy_read_page) { -+ BUG_ON(write); -+ continue; -+ } -+ -+ if (write && dirty && !PageReserved(page)) -+ set_page_dirty_lock(page); -+ -+ ttm->pages[i] = NULL; -+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); -+ put_page(page); -+ } -+ ttm->state = tt_unpopulated; -+ ttm->first_himem_page = ttm->num_pages; -+ ttm->last_lomem_page = -1; -+} -+ -+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) -+{ -+ struct page *p; -+ struct ttm_bo_device *bdev = ttm->bdev; -+ struct ttm_mem_global *mem_glob = bdev->mem_glob; -+ int ret; -+ -+ while (NULL == (p = ttm->pages[index])) { -+ p = ttm_tt_alloc_page(); -+ -+ if (!p) -+ return NULL; -+ -+ if (PageHighMem(p)) { -+ ret = -+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ ttm->pages[--ttm->first_himem_page] = p; -+ } else { -+ ret = -+ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ ttm->pages[++ttm->last_lomem_page] = p; -+ } -+ } -+ return p; -+ out_err: -+ put_page(p); -+ return NULL; -+} -+ -+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index) -+{ -+ int ret; -+ -+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { -+ ret = ttm_tt_swapin(ttm); -+ if (unlikely(ret != 0)) -+ return NULL; -+ } -+ return __ttm_tt_get_page(ttm, index); -+} -+ -+int ttm_tt_populate(struct ttm_tt *ttm) -+{ -+ struct page *page; -+ unsigned long i; -+ struct ttm_backend *be; -+ int ret; -+ -+ if (ttm->state != tt_unpopulated) -+ return 0; -+ -+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { -+ ret = ttm_tt_swapin(ttm); -+ if (unlikely(ret != 0)) -+ return ret; -+ } -+ -+ be = ttm->be; -+ -+ for (i = 0; i < ttm->num_pages; ++i) { -+ page = __ttm_tt_get_page(ttm, i); -+ if (!page) -+ return -ENOMEM; -+ } -+ -+ be->func->populate(be, ttm->num_pages, ttm->pages, -+ ttm->dummy_read_page); -+ ttm->state = tt_unbound; -+ return 0; -+} -+ -+#ifdef CONFIG_X86 -+static inline int ttm_tt_set_page_caching(struct page *p, -+ enum ttm_caching_state c_state) -+{ -+ if (PageHighMem(p)) -+ return 0; -+ -+ switch (c_state) { -+ case tt_cached: -+ return set_pages_wb(p, 1); -+ case tt_wc: -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) -+ return set_memory_wc((unsigned long) page_address(p), 1); -+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */ -+ default: -+ return set_pages_uc(p, 1); -+ } -+} -+#else /* CONFIG_X86 */ -+static inline int ttm_tt_set_page_caching(struct page *p, -+ enum ttm_caching_state c_state) -+{ -+ return 0; -+} -+#endif /* CONFIG_X86 */ -+ -+/* -+ * Change caching policy for the linear kernel map -+ * for range of pages in a ttm. -+ */ -+ -+static int ttm_tt_set_caching(struct ttm_tt *ttm, -+ enum ttm_caching_state c_state) -+{ -+ int i, j; -+ struct page *cur_page; -+ int ret; -+ -+ if (ttm->caching_state == c_state) -+ return 0; -+ -+ if (c_state != tt_cached) { -+ ret = ttm_tt_populate(ttm); -+ if (unlikely(ret != 0)) -+ return ret; -+ } -+ -+ if (ttm->caching_state == tt_cached) -+ ttm_tt_cache_flush(ttm->pages, ttm->num_pages); -+ -+ for (i = 0; i < ttm->num_pages; ++i) { -+ cur_page = ttm->pages[i]; -+ if (likely(cur_page != NULL)) { -+ ret = ttm_tt_set_page_caching(cur_page, c_state); -+ if (unlikely(ret != 0)) -+ goto out_err; -+ } -+ } -+ -+ ttm->caching_state = c_state; -+ -+ return 0; -+ -+ out_err: -+ for (j = 0; j < i; ++j) { -+ cur_page = ttm->pages[j]; -+ if (likely(cur_page != NULL)) { -+ (void)ttm_tt_set_page_caching(cur_page, -+ ttm->caching_state); -+ } -+ } -+ -+ return ret; -+} -+ -+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) -+{ -+ enum ttm_caching_state state; -+ -+ if (placement & TTM_PL_FLAG_WC) -+ state = tt_wc; -+ else if (placement & TTM_PL_FLAG_UNCACHED) -+ state = tt_uncached; -+ else -+ state = tt_cached; -+ -+ return ttm_tt_set_caching(ttm, state); -+} -+ -+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) -+{ -+ int i; -+ struct page *cur_page; -+ struct ttm_backend *be = ttm->be; -+ -+ if (be) -+ be->func->clear(be); -+ (void)ttm_tt_set_caching(ttm, tt_cached); -+ for (i = 0; i < ttm->num_pages; ++i) { -+ cur_page = ttm->pages[i]; -+ ttm->pages[i] = NULL; -+ if (cur_page) { -+ if (page_count(cur_page) != 1) -+ printk(KERN_ERR -+ "Erroneous page count. Leaking pages.\n"); -+ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, -+ PageHighMem(cur_page)); -+ __free_page(cur_page); -+ } -+ } -+ ttm->state = tt_unpopulated; -+ ttm->first_himem_page = ttm->num_pages; -+ ttm->last_lomem_page = -1; -+} -+ -+void ttm_tt_destroy(struct ttm_tt *ttm) -+{ -+ struct ttm_backend *be; -+ -+ if (unlikely(ttm == NULL)) -+ return; -+ -+ be = ttm->be; -+ if (likely(be != NULL)) { -+ be->func->destroy(be); -+ ttm->be = NULL; -+ } -+ -+ if (likely(ttm->pages != NULL)) { -+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) -+ ttm_tt_free_user_pages(ttm); -+ else -+ ttm_tt_free_alloced_pages(ttm); -+ -+ ttm_tt_free_page_directory(ttm); -+ } -+ -+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && -+ ttm->swap_storage) -+ fput(ttm->swap_storage); -+ -+ kfree(ttm); -+} -+ -+int ttm_tt_set_user(struct ttm_tt *ttm, -+ struct task_struct *tsk, -+ unsigned long start, unsigned long num_pages) -+{ -+ struct mm_struct *mm = tsk->mm; -+ int ret; -+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; -+ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; -+ -+ BUG_ON(num_pages != ttm->num_pages); -+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); -+ -+ /** -+ * Account user pages as lowmem pages for now. -+ */ -+ -+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ down_read(&mm->mmap_sem); -+ ret = get_user_pages(tsk, mm, start, num_pages, -+ write, 0, ttm->pages, NULL); -+ up_read(&mm->mmap_sem); -+ -+ if (ret != num_pages && write) { -+ ttm_tt_free_user_pages(ttm); -+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); -+ return -ENOMEM; -+ } -+ -+ ttm->tsk = tsk; -+ ttm->start = start; -+ ttm->state = tt_unbound; -+ -+ return 0; -+} -+ -+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, -+ uint32_t page_flags, struct page *dummy_read_page) -+{ -+ struct ttm_bo_driver *bo_driver = bdev->driver; -+ struct ttm_tt *ttm; -+ -+ if (!bo_driver) -+ return NULL; -+ -+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL); -+ if (!ttm) -+ return NULL; -+ -+ ttm->bdev = bdev; -+ -+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; -+ ttm->first_himem_page = ttm->num_pages; -+ ttm->last_lomem_page = -1; -+ ttm->caching_state = tt_cached; -+ ttm->page_flags = page_flags; -+ -+ ttm->dummy_read_page = dummy_read_page; -+ -+ ttm_tt_alloc_page_directory(ttm); -+ if (!ttm->pages) { -+ ttm_tt_destroy(ttm); -+ printk(KERN_ERR "Failed allocating page table\n"); -+ return NULL; -+ } -+ ttm->be = bo_driver->create_ttm_backend_entry(bdev); -+ if (!ttm->be) { -+ ttm_tt_destroy(ttm); -+ printk(KERN_ERR "Failed creating ttm backend entry\n"); -+ return NULL; -+ } -+ ttm->state = tt_unpopulated; -+ return ttm; -+} -+ -+/** -+ * ttm_tt_unbind: -+ * -+ * @ttm: the object to unbind from the graphics device -+ * -+ * Unbind an object from the aperture. This removes the mappings -+ * from the graphics device and flushes caches if necessary. -+ */ -+void ttm_tt_unbind(struct ttm_tt *ttm) -+{ -+ int ret; -+ struct ttm_backend *be = ttm->be; -+ -+ if (ttm->state == tt_bound) { -+ ret = be->func->unbind(be); -+ BUG_ON(ret); -+ } -+ ttm->state = tt_unbound; -+} -+ -+/** -+ * ttm_tt_bind: -+ * -+ * @ttm: the ttm object to bind to the graphics device -+ * -+ * @bo_mem: the aperture memory region which will hold the object -+ * -+ * Bind a ttm object to the aperture. This ensures that the necessary -+ * pages are allocated, flushes CPU caches as needed and marks the -+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been -+ * modified by the GPU -+ */ -+ -+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) -+{ -+ int ret = 0; -+ struct ttm_backend *be; -+ -+ if (!ttm) -+ return -EINVAL; -+ -+ if (ttm->state == tt_bound) -+ return 0; -+ -+ be = ttm->be; -+ -+ ret = ttm_tt_populate(ttm); -+ if (ret) -+ return ret; -+ -+ ret = be->func->bind(be, bo_mem); -+ if (ret) { -+ printk(KERN_ERR "Couldn't bind backend.\n"); -+ return ret; -+ } -+ -+ ttm->state = tt_bound; -+ -+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) -+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY; -+ return 0; -+} -+ -+static int ttm_tt_swapin(struct ttm_tt *ttm) -+{ -+ struct address_space *swap_space; -+ struct file *swap_storage; -+ struct page *from_page; -+ struct page *to_page; -+ void *from_virtual; -+ void *to_virtual; -+ int i; -+ int ret; -+ -+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) { -+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, -+ ttm->num_pages); -+ if (unlikely(ret != 0)) -+ return ret; -+ -+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; -+ return 0; -+ } -+ -+ swap_storage = ttm->swap_storage; -+ BUG_ON(swap_storage == NULL); -+ -+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; -+ -+ for (i = 0; i < ttm->num_pages; ++i) { -+ from_page = read_mapping_page(swap_space, i, NULL); -+ if (IS_ERR(from_page)) -+ goto out_err; -+ to_page = __ttm_tt_get_page(ttm, i); -+ if (unlikely(to_page == NULL)) -+ goto out_err; -+ -+ preempt_disable(); -+ from_virtual = kmap_atomic(from_page, KM_USER0); -+ to_virtual = kmap_atomic(to_page, KM_USER1); -+ memcpy(to_virtual, from_virtual, PAGE_SIZE); -+ kunmap_atomic(to_virtual, KM_USER1); -+ kunmap_atomic(from_virtual, KM_USER0); -+ preempt_enable(); -+ page_cache_release(from_page); -+ } -+ -+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) -+ fput(swap_storage); -+ ttm->swap_storage = NULL; -+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; -+ -+ return 0; -+ out_err: -+ ttm_tt_free_alloced_pages(ttm); -+ return -ENOMEM; -+} -+ -+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) -+{ -+ struct address_space *swap_space; -+ struct file *swap_storage; -+ struct page *from_page; -+ struct page *to_page; -+ void *from_virtual; -+ void *to_virtual; -+ int i; -+ -+ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); -+ BUG_ON(ttm->caching_state != tt_cached); -+ -+ /* -+ * For user buffers, just unpin the pages, as there should be -+ * vma references. -+ */ -+ -+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) { -+ ttm_tt_free_user_pages(ttm); -+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; -+ ttm->swap_storage = NULL; -+ return 0; -+ } -+ -+ if (!persistant_swap_storage) { -+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) -+ swap_storage = shmem_file_setup("ttm swap", -+ ttm->num_pages << PAGE_SHIFT, -+ 0); -+ if (unlikely(IS_ERR(swap_storage))) { -+ printk(KERN_ERR "Failed allocating swap storage.\n"); -+ return -ENOMEM; -+ } -+#else -+ return -ENOMEM; -+#endif -+ } else -+ swap_storage = persistant_swap_storage; -+ -+ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; -+ -+ for (i = 0; i < ttm->num_pages; ++i) { -+ from_page = ttm->pages[i]; -+ if (unlikely(from_page == NULL)) -+ continue; -+ to_page = read_mapping_page(swap_space, i, NULL); -+ if (unlikely(to_page == NULL)) -+ goto out_err; -+ -+ preempt_disable(); -+ from_virtual = kmap_atomic(from_page, KM_USER0); -+ to_virtual = kmap_atomic(to_page, KM_USER1); -+ memcpy(to_virtual, from_virtual, PAGE_SIZE); -+ kunmap_atomic(to_virtual, KM_USER1); -+ kunmap_atomic(from_virtual, KM_USER0); -+ preempt_enable(); -+ set_page_dirty(to_page); -+ mark_page_accessed(to_page); -+// unlock_page(to_page); -+ page_cache_release(to_page); -+ } -+ -+ ttm_tt_free_alloced_pages(ttm); -+ ttm->swap_storage = swap_storage; -+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; -+ if (persistant_swap_storage) -+ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; -+ -+ return 0; -+ out_err: -+ if (!persistant_swap_storage) -+ fput(swap_storage); -+ -+ return -ENOMEM; -+} -diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h ---- a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,79 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_USEROBJ_API_H_ -+#define _TTM_USEROBJ_API_H_ -+ -+#include "ttm/ttm_placement_user.h" -+#include "ttm/ttm_fence_user.h" -+#include "ttm/ttm_object.h" -+#include "ttm/ttm_fence_api.h" -+#include "ttm/ttm_bo_api.h" -+ -+struct ttm_lock; -+ -+/* -+ * User ioctls. -+ */ -+ -+extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile, -+ struct ttm_bo_device *bdev, -+ struct ttm_lock *lock, void *data); -+extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, -+ struct ttm_bo_device *bdev, -+ struct ttm_lock *lock, void *data); -+extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, -+ struct ttm_lock *lock, void *data); -+extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data); -+extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data); -+ -+extern int -+ttm_fence_user_create(struct ttm_fence_device *fdev, -+ struct ttm_object_file *tfile, -+ uint32_t fence_class, -+ uint32_t fence_types, -+ uint32_t create_flags, -+ struct ttm_fence_object **fence, uint32_t * user_handle); -+ -+extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file -+ *tfile, -+ uint32_t handle); -+ -+extern int -+ttm_pl_verify_access(struct ttm_buffer_object *bo, -+ struct ttm_object_file *tfile); -+#endif -diff -uNr a/include/drm/drm_compat.h b/include/drm/drm_compat.h ---- a/include/drm/drm_compat.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/include/drm/drm_compat.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,238 @@ -+/** -+ * \file drm_compat.h -+ * Backward compatability definitions for Direct Rendering Manager -+ * -+ * \author Rickard E. (Rik) Faith <faith@valinux.com> -+ * \author Gareth Hughes <gareth@valinux.com> -+ */ -+ -+/* -+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. -+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. -+ * All rights reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#ifndef _DRM_COMPAT_H_ -+#define _DRM_COMPAT_H_ -+ -+#ifndef minor -+#define minor(x) MINOR((x)) -+#endif -+ -+#ifndef MODULE_LICENSE -+#define MODULE_LICENSE(x) -+#endif -+ -+#ifndef preempt_disable -+#define preempt_disable() -+#define preempt_enable() -+#endif -+ -+#ifndef pte_offset_map -+#define pte_offset_map pte_offset -+#define pte_unmap(pte) -+#endif -+ -+#ifndef module_param -+#define module_param(name, type, perm) -+#endif -+ -+/* older kernels had different irq args */ -+ -+#ifndef list_for_each_safe -+#define list_for_each_safe(pos, n, head) \ -+ for (pos = (head)->next, n = pos->next; pos != (head); \ -+ pos = n, n = pos->next) -+#endif -+ -+#ifndef list_for_each_entry -+#define list_for_each_entry(pos, head, member) \ -+ for (pos = list_entry((head)->next, typeof(*pos), member), \ -+ prefetch(pos->member.next); \ -+ &pos->member != (head); \ -+ pos = list_entry(pos->member.next, typeof(*pos), member), \ -+ prefetch(pos->member.next)) -+#endif -+ -+#ifndef list_for_each_entry_safe -+#define list_for_each_entry_safe(pos, n, head, member) \ -+ for (pos = list_entry((head)->next, typeof(*pos), member), \ -+ n = list_entry(pos->member.next, typeof(*pos), member); \ -+ &pos->member != (head); \ -+ pos = n, n = list_entry(n->member.next, typeof(*n), member)) -+#endif -+ -+#ifndef __user -+#define __user -+#endif -+ -+#if !defined(__put_page) -+#define __put_page(p) atomic_dec(&(p)->count) -+#endif -+ -+#if !defined(__GFP_COMP) -+#define __GFP_COMP 0 -+#endif -+ -+#if !defined(IRQF_SHARED) -+#define IRQF_SHARED SA_SHIRQ -+#endif -+ -+ -+ -+#ifndef DEFINE_SPINLOCK -+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -+#endif -+ -+/* old architectures */ -+#ifdef __AMD64__ -+#define __x86_64__ -+#endif -+ -+/* sysfs __ATTR macro */ -+#ifndef __ATTR -+#define __ATTR(_name,_mode,_show,_store) { \ -+ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ -+ .show = _show, \ -+ .store = _store, \ -+} -+#endif -+ -+ -+#ifndef list_for_each_entry_safe_reverse -+#define list_for_each_entry_safe_reverse(pos, n, head, member) \ -+ for (pos = list_entry((head)->prev, typeof(*pos), member), \ -+ n = list_entry(pos->member.prev, typeof(*pos), member); \ -+ &pos->member != (head); \ -+ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) -+#endif -+ -+#include <linux/mm.h> -+#include <asm/page.h> -+ -+ -+#define DRM_FULL_MM_COMPAT -+ -+ -+/* -+ * Flush relevant caches and clear a VMA structure so that page references -+ * will cause a page fault. Don't flush tlbs. -+ */ -+ -+extern void drm_clear_vma(struct vm_area_struct *vma, -+ unsigned long addr, unsigned long end); -+ -+/* -+ * Return the PTE protection map entries for the VMA flags given by -+ * flags. This is a functional interface to the kernel's protection map. -+ */ -+ -+extern pgprot_t vm_get_page_prot(unsigned long vm_flags); -+ -+#ifndef GFP_DMA32 -+#define GFP_DMA32 GFP_KERNEL -+#endif -+#ifndef __GFP_DMA32 -+#define __GFP_DMA32 GFP_KERNEL -+#endif -+ -+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -+ -+/* -+ * These are too slow in earlier kernels. -+ */ -+ -+extern int drm_unmap_page_from_agp(struct page *page); -+extern int drm_map_page_into_agp(struct page *page); -+ -+#define map_page_into_agp drm_map_page_into_agp -+#define unmap_page_from_agp drm_unmap_page_from_agp -+#endif -+ -+ -+ -+ -+ -+/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ -+#ifdef DRM_IDR_COMPAT_FN -+int idr_for_each(struct idr *idp, -+ int (*fn)(int id, void *p, void *data), void *data); -+void idr_remove_all(struct idr *idp); -+#endif -+ -+ -+ -+ -+ -+ -+#ifndef PM_EVENT_PRETHAW -+#define PM_EVENT_PRETHAW 3 -+#endif -+ -+ -+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM) && \ -+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))) -+#define DRM_KMAP_ATOMIC_PROT_PFN -+extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, -+ pgprot_t protection); -+#endif -+ -+#if !defined(flush_agp_mappings) -+#define flush_agp_mappings() do {} while(0) -+#endif -+ -+#ifndef DMA_BIT_MASK -+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) -+#endif -+ -+#ifndef VM_CAN_NONLINEAR -+#define DRM_VM_NOPAGE 1 -+#endif -+ -+#ifdef DRM_VM_NOPAGE -+ -+extern struct page *drm_vm_nopage(struct vm_area_struct *vma, -+ unsigned long address, int *type); -+ -+extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, -+ unsigned long address, int *type); -+ -+extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, -+ unsigned long address, int *type); -+ -+extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, -+ unsigned long address, int *type); -+#endif -+ -+#define drm_on_each_cpu(handler, data, wait) \ -+ on_each_cpu(handler, data, wait) -+ -+ -+#ifndef OS_HAS_GEM -+#define OS_HAS_GEM 1 -+#endif -+ -+#ifndef current_euid -+#define current_euid() (current->euid) -+#endif -+ -+#endif -diff -uNr a/include/drm/drm_internal.h b/include/drm/drm_internal.h ---- a/include/drm/drm_internal.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/include/drm/drm_internal.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,40 @@ -+/* -+ * Copyright 2007 Red Hat, Inc -+ * All rights reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the "Software"), -+ * to deal in the Software without restriction, including without limitation -+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, -+ * and/or sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the next -+ * paragraph) shall be included in all copies or substantial portions of the -+ * Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+/* This header file holds function prototypes and data types that are -+ * internal to the drm (not exported to user space) but shared across -+ * drivers and platforms */ -+ -+#ifndef __DRM_INTERNAL_H__ -+#define __DRM_INTERNAL_H__ -+ -+/** -+ * Drawable information. -+ */ -+struct drm_drawable_info { -+ unsigned int num_rects; -+ struct drm_clip_rect *rects; -+}; -+ -+#endif -diff -uNr a/include/drm/ttm/ttm_fence_user.h b/include/drm/ttm/ttm_fence_user.h ---- a/include/drm/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/include/drm/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,147 @@ -+/************************************************************************** -+ * -+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ **************************************************************************/ -+/* -+ * Authors -+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef TTM_FENCE_USER_H -+#define TTM_FENCE_USER_H -+ -+#if !defined(__KERNEL__) && !defined(_KERNEL) -+#include <stdint.h> -+#endif -+ -+#define TTM_FENCE_MAJOR 0 -+#define TTM_FENCE_MINOR 1 -+#define TTM_FENCE_PL 0 -+#define TTM_FENCE_DATE "080819" -+ -+/** -+ * struct ttm_fence_signaled_req -+ * -+ * @handle: Handle to the fence object. Input. -+ * -+ * @fence_type: Fence types we want to flush. Input. -+ * -+ * @flush: Boolean. Flush the indicated fence_types. Input. -+ * -+ * Argument to the TTM_FENCE_SIGNALED ioctl. -+ */ -+ -+struct ttm_fence_signaled_req { -+ uint32_t handle; -+ uint32_t fence_type; -+ int32_t flush; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_fence_rep -+ * -+ * @signaled_types: Fence type that has signaled. -+ * -+ * @fence_error: Command execution error. -+ * Hardware errors that are consequences of the execution -+ * of the command stream preceding the fence are reported -+ * here. -+ * -+ * Output argument to the TTM_FENCE_SIGNALED and -+ * TTM_FENCE_FINISH ioctls. -+ */ -+ -+struct ttm_fence_rep { -+ uint32_t signaled_types; -+ uint32_t fence_error; -+}; -+ -+union ttm_fence_signaled_arg { -+ struct ttm_fence_signaled_req req; -+ struct ttm_fence_rep rep; -+}; -+ -+/* -+ * Waiting mode flags for the TTM_FENCE_FINISH ioctl. -+ * -+ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling -+ * wait. -+ * -+ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU, -+ * but return -EBUSY if the buffer is busy. -+ */ -+ -+#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0) -+#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1) -+ -+/** -+ * struct ttm_fence_finish_req -+ * -+ * @handle: Handle to the fence object. Input. -+ * -+ * @fence_type: Fence types we want to finish. -+ * -+ * @mode: Wait mode. -+ * -+ * Input to the TTM_FENCE_FINISH ioctl. -+ */ -+ -+struct ttm_fence_finish_req { -+ uint32_t handle; -+ uint32_t fence_type; -+ uint32_t mode; -+ uint32_t pad64; -+}; -+ -+union ttm_fence_finish_arg { -+ struct ttm_fence_finish_req req; -+ struct ttm_fence_rep rep; -+}; -+ -+/** -+ * struct ttm_fence_unref_arg -+ * -+ * @handle: Handle to the fence object. -+ * -+ * Argument to the TTM_FENCE_UNREF ioctl. -+ */ -+ -+struct ttm_fence_unref_arg { -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/* -+ * Ioctl offsets frome extenstion start. -+ */ -+ -+#define TTM_FENCE_SIGNALED 0x01 -+#define TTM_FENCE_FINISH 0x02 -+#define TTM_FENCE_UNREF 0x03 -+ -+#endif -diff -uNr a/include/drm/ttm/ttm_placement_common.h b/include/drm/ttm/ttm_placement_common.h ---- a/include/drm/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/include/drm/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,96 @@ -+/************************************************************************** -+ * -+ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ **************************************************************************/ -+/* -+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_PL_COMMON_H_ -+#define _TTM_PL_COMMON_H_ -+/* -+ * Memory regions for data placement. -+ */ -+ -+#define TTM_PL_SYSTEM 0 -+#define TTM_PL_TT 1 -+#define TTM_PL_VRAM 2 -+#define TTM_PL_PRIV0 3 -+#define TTM_PL_PRIV1 4 -+#define TTM_PL_PRIV2 5 -+#define TTM_PL_PRIV3 6 -+#define TTM_PL_PRIV4 7 -+#define TTM_PL_PRIV5 8 -+#define TTM_PL_CI 9 -+#define TTM_PL_SWAPPED 15 -+ -+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM) -+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT) -+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM) -+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0) -+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1) -+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2) -+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3) -+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4) -+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5) -+#define TTM_PL_FLAG_CI (1 << TTM_PL_CI) -+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED) -+#define TTM_PL_MASK_MEM 0x0000FFFF -+ -+/* -+ * Other flags that affects data placement. -+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings -+ * if available. -+ * TTM_PL_FLAG_SHARED means that another application may -+ * reference the buffer. -+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never -+ * be evicted to make room for other buffers. -+ */ -+ -+#define TTM_PL_FLAG_CACHED (1 << 16) -+#define TTM_PL_FLAG_UNCACHED (1 << 17) -+#define TTM_PL_FLAG_WC (1 << 18) -+#define TTM_PL_FLAG_SHARED (1 << 20) -+#define TTM_PL_FLAG_NO_EVICT (1 << 21) -+ -+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ -+ TTM_PL_FLAG_UNCACHED | \ -+ TTM_PL_FLAG_WC) -+ -+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING) -+ -+/* -+ * Access flags to be used for CPU- and GPU- mappings. -+ * The idea is that the TTM synchronization mechanism will -+ * allow concurrent READ access and exclusive write access. -+ * Currently GPU- and CPU accesses are exclusive. -+ */ -+ -+#define TTM_ACCESS_READ (1 << 0) -+#define TTM_ACCESS_WRITE (1 << 1) -+ -+#endif -diff -uNr a/include/drm/ttm/ttm_placement_user.h b/include/drm/ttm/ttm_placement_user.h ---- a/include/drm/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800 -+++ b/include/drm/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700 -@@ -0,0 +1,259 @@ -+/************************************************************************** -+ * -+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA -+ * All Rights Reserved. -+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA -+ * All Rights Reserved. -+ * -+ * Permission is hereby granted, free of charge, to any person obtaining a -+ * copy of this software and associated documentation files (the -+ * "Software"), to deal in the Software without restriction, including -+ * without limitation the rights to use, copy, modify, merge, publish, -+ * distribute, sub license, and/or sell copies of the Software, and to -+ * permit persons to whom the Software is furnished to do so, subject to -+ * the following conditions: -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL -+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, -+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -+ * USE OR OTHER DEALINGS IN THE SOFTWARE. -+ * -+ * The above copyright notice and this permission notice (including the -+ * next paragraph) shall be included in all copies or substantial portions -+ * of the Software. -+ * -+ **************************************************************************/ -+/* -+ * Authors -+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> -+ */ -+ -+#ifndef _TTM_PLACEMENT_USER_H_ -+#define _TTM_PLACEMENT_USER_H_ -+ -+#if !defined(__KERNEL__) && !defined(_KERNEL) -+#include <stdint.h> -+#else -+#include <linux/kernel.h> -+#endif -+ -+#include "ttm/ttm_placement_common.h" -+ -+#define TTM_PLACEMENT_MAJOR 0 -+#define TTM_PLACEMENT_MINOR 1 -+#define TTM_PLACEMENT_PL 0 -+#define TTM_PLACEMENT_DATE "080819" -+ -+/** -+ * struct ttm_pl_create_req -+ * -+ * @size: The buffer object size. -+ * @placement: Flags that indicate initial acceptable -+ * placement. -+ * @page_alignment: Required alignment in pages. -+ * -+ * Input to the TTM_BO_CREATE ioctl. -+ */ -+ -+struct ttm_pl_create_req { -+ uint64_t size; -+ uint32_t placement; -+ uint32_t page_alignment; -+}; -+ -+/** -+ * struct ttm_pl_create_ub_req -+ * -+ * @size: The buffer object size. -+ * @user_address: User-space address of the memory area that -+ * should be used to back the buffer object cast to 64-bit. -+ * @placement: Flags that indicate initial acceptable -+ * placement. -+ * @page_alignment: Required alignment in pages. -+ * -+ * Input to the TTM_BO_CREATE_UB ioctl. -+ */ -+ -+struct ttm_pl_create_ub_req { -+ uint64_t size; -+ uint64_t user_address; -+ uint32_t placement; -+ uint32_t page_alignment; -+}; -+ -+/** -+ * struct ttm_pl_rep -+ * -+ * @gpu_offset: The current offset into the memory region used. -+ * This can be used directly by the GPU if there are no -+ * additional GPU mapping procedures used by the driver. -+ * -+ * @bo_size: Actual buffer object size. -+ * -+ * @map_handle: Offset into the device address space. -+ * Used for map, seek, read, write. This will never change -+ * during the lifetime of an object. -+ * -+ * @placement: Flag indicating the placement status of -+ * the buffer object using the TTM_PL flags above. -+ * -+ * @sync_object_arg: Used for user-space synchronization and -+ * depends on the synchronization model used. If fences are -+ * used, this is the buffer_object::fence_type_mask -+ * -+ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and -+ * TTM_PL_SETSTATUS ioctls. -+ */ -+ -+struct ttm_pl_rep { -+ uint64_t gpu_offset; -+ uint64_t bo_size; -+ uint64_t map_handle; -+ uint32_t placement; -+ uint32_t handle; -+ uint32_t sync_object_arg; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_pl_setstatus_req -+ * -+ * @set_placement: Placement flags to set. -+ * -+ * @clr_placement: Placement flags to clear. -+ * -+ * @handle: The object handle -+ * -+ * Input to the TTM_PL_SETSTATUS ioctl. -+ */ -+ -+struct ttm_pl_setstatus_req { -+ uint32_t set_placement; -+ uint32_t clr_placement; -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/** -+ * struct ttm_pl_reference_req -+ * -+ * @handle: The object to put a reference on. -+ * -+ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls. -+ */ -+ -+struct ttm_pl_reference_req { -+ uint32_t handle; -+ uint32_t pad64; -+}; -+ -+/* -+ * ACCESS mode flags for SYNCCPU. -+ * -+ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not -+ * writing to the buffer. -+ * -+ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not -+ * accessing the buffer. -+ * -+ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait -+ * for GPU accesses to finish but return -EBUSY. -+ * -+ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable -+ * memory while synchronized for CPU. -+ */ -+ -+#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ -+#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE -+#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2) -+#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3) -+ -+/** -+ * struct ttm_pl_synccpu_arg -+ * -+ * @handle: The object to synchronize. -+ * -+ * @access_mode: access mode indicated by the -+ * TTM_SYNCCPU_MODE flags. -+ * -+ * @op: indicates whether to grab or release the -+ * buffer for cpu usage. -+ * -+ * Input to the TTM_PL_SYNCCPU ioctl. -+ */ -+ -+struct ttm_pl_synccpu_arg { -+ uint32_t handle; -+ uint32_t access_mode; -+ enum { -+ TTM_PL_SYNCCPU_OP_GRAB, -+ TTM_PL_SYNCCPU_OP_RELEASE -+ } op; -+ uint32_t pad64; -+}; -+ -+/* -+ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl. -+ * -+ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling -+ * wait. -+ * -+ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU, -+ * but return -EBUSY if the buffer is busy. -+ */ -+ -+#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0) -+#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1) -+ -+/** -+ * struct ttm_waitidle_arg -+ * -+ * @handle: The object to synchronize. -+ * -+ * @mode: wait mode indicated by the -+ * TTM_SYNCCPU_MODE flags. -+ * -+ * Argument to the TTM_BO_WAITIDLE ioctl. -+ */ -+ -+struct ttm_pl_waitidle_arg { -+ uint32_t handle; -+ uint32_t mode; -+}; -+ -+union ttm_pl_create_arg { -+ struct ttm_pl_create_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_reference_arg { -+ struct ttm_pl_reference_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_setstatus_arg { -+ struct ttm_pl_setstatus_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+union ttm_pl_create_ub_arg { -+ struct ttm_pl_create_ub_req req; -+ struct ttm_pl_rep rep; -+}; -+ -+/* -+ * Ioctl offsets. -+ */ -+ -+#define TTM_PL_CREATE 0x00 -+#define TTM_PL_REFERENCE 0x01 -+#define TTM_PL_UNREF 0x02 -+#define TTM_PL_SYNCCPU 0x03 -+#define TTM_PL_WAITIDLE 0x04 -+#define TTM_PL_SETSTATUS 0x05 -+#define TTM_PL_CREATE_UB 0x06 -+ -+#endif |