From dba153e51695d71eec91123468c9ca9ddc97b9dc Mon Sep 17 00:00:00 2001 From: Koen Kooi Date: Mon, 8 Dec 2008 16:39:00 +0100 Subject: linux-omap 2.6.27: add pvr patch from http://repository.maemo.org/pool/maemo5.0/free/k/kernel/kernel_2.6.27-20084805r03.diff.gz * this also needs a patch to dispc.{c,h}, which needs a bit more work * once the dispc patch is in drivers/gpu/Makefile can add drm-tungsten and pvr to obj-y --- .../linux/linux-omap-2.6.27/beagleboard/defconfig | 70 +- packages/linux/linux-omap-2.6.27/pvr/pvr-add.patch | 155094 ++++++++++++++++++ packages/linux/linux-omap_2.6.27.bb | 13 +- 3 files changed, 155133 insertions(+), 44 deletions(-) create mode 100644 packages/linux/linux-omap-2.6.27/pvr/pvr-add.patch diff --git a/packages/linux/linux-omap-2.6.27/beagleboard/defconfig b/packages/linux/linux-omap-2.6.27/beagleboard/defconfig index ed2acaafff..31b322007a 100644 --- a/packages/linux/linux-omap-2.6.27/beagleboard/defconfig +++ b/packages/linux/linux-omap-2.6.27/beagleboard/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.27-omap1 -# Sun Dec 7 16:18:46 2008 +# Mon Dec 8 16:15:28 2008 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y @@ -50,13 +50,13 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_CGROUPS=y # CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set CONFIG_CGROUP_DEVICE=y -CONFIG_GROUP_SCHED=n +CONFIG_GROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y # CONFIG_RT_GROUP_SCHED is not set -# CONFIG_USER_SCHED is not set +CONFIG_USER_SCHED=y # CONFIG_CGROUP_SCHED is not set -CONFIG_VZ_FAIRSCHED=y # CONFIG_CGROUP_CPUACCT is not set # CONFIG_RESOURCE_COUNTERS is not set CONFIG_SYSFS_DEPRECATED=y @@ -91,8 +91,9 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set +# CONFIG_SLUB_DEBUG is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y # CONFIG_MARKERS is not set @@ -109,7 +110,6 @@ CONFIG_HAVE_KRETPROBES=y CONFIG_HAVE_CLK=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y # CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 @@ -363,7 +363,6 @@ CONFIG_NET=y # # Networking options # -CONFIG_NET_NS=y CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -1261,6 +1260,30 @@ CONFIG_DVB_ISL6421=m # # Graphics support # +CONFIG_PVR=m +CONFIG_PVR_TRANSFER_QUEUE=y +CONFIG_PVR_SUPPORT_SRVINIT=y +CONFIG_PVR_SUPPORT_SECURE_HANDLES=y +CONFIG_PVR_SERVICES4=y +CONFIG_PVR_SGXCORE_530=y +CONFIG_PVR_SGX_CORE_REV_103=y +CONFIG_PVR_PVR2D_ALT_2DHW=y +CONFIG_PVR_SYSTEM_OMAP3430=y +# CONFIG_PVR_SYSTEM_NO_HARDWARE is not set +# CONFIG_PVR_BUFFERCLASS_EXAMPLE is not set +CONFIG_PVR_USE_PTHREADS=y +CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT=y +CONFIG_PVR_SYS_USING_INTERRUPTS=y +CONFIG_PVR_SUPPORT_HW_RECOVERY=y +CONFIG_PVR_SUPPORT_ACTIVE_POWER_MANAGEMENT=y +CONFIG_PVR_BUILD_RELEASE=y +# CONFIG_PVR_BUILD_DEBUG is not set +# CONFIG_PVR_BUILD_TIMING is not set +CONFIG_PVR_SUPPORT_SGX1=y +# CONFIG_DRM_VER_ORIG is not set +CONFIG_DRM_VER_TUNGSTEN=y +CONFIG_DRM_TUNGSTEN=y +CONFIG_DRM_TUNGSTEN_PVR2D=m # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set CONFIG_FB=y @@ -1698,13 +1721,8 @@ CONFIG_INOTIFY_USER=y CONFIG_QUOTA=y # CONFIG_QUOTA_NETLINK_INTERFACE is not set CONFIG_PRINT_QUOTA_WARNING=y -CONFIG_QUOTA_COMPAT=y # CONFIG_QFMT_V1 is not set CONFIG_QFMT_V2=y -CONFIG_SIM_FS=m -CONFIG_VZ_QUOTA=m -# CONFIG_VZ_QUOTA_UNLOAD is not set -CONFIG_VZ_QUOTA_UGID=y CONFIG_QUOTACTL=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set @@ -1870,7 +1888,6 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set -CONFIG_SYSRQ_DEBUG=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y @@ -1880,7 +1897,6 @@ CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y # CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1922,6 +1938,7 @@ CONFIG_HAVE_ARCH_KGDB=y # Security options # # CONFIG_KEYS is not set +# CONFIG_SECURITY is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_XOR_BLOCKS=m CONFIG_ASYNC_CORE=m @@ -2032,26 +2049,3 @@ CONFIG_PLIST=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y - -# -# OpenVZ -# -CONFIG_VE=y -CONFIG_VE_CALLS=m -CONFIG_VZ_GENCALLS=y -CONFIG_VE_NETDEV=m -CONFIG_VE_ETHDEV=m -CONFIG_VZ_DEV=m -CONFIG_VZ_WDOG=m -CONFIG_VZ_CHECKPOINT=n - -# -# User resources -# -CONFIG_BEANCOUNTERS=y -CONFIG_BC_RSS_ACCOUNTING=y -CONFIG_BC_IO_ACCOUNTING=y -CONFIG_BC_IO_SCHED=y -CONFIG_BC_SWAP_ACCOUNTING=y -CONFIG_BC_PROC=y -# CONFIG_BC_DEBUG is not set diff --git a/packages/linux/linux-omap-2.6.27/pvr/pvr-add.patch b/packages/linux/linux-omap-2.6.27/pvr/pvr-add.patch new file mode 100644 index 0000000000..9ff89fee3d --- /dev/null +++ b/packages/linux/linux-omap-2.6.27/pvr/pvr-add.patch @@ -0,0 +1,155094 @@ +diff -Nurd git/drivers/gpu/drm-tungsten/ati_pcigart.c git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c +--- kernel-2.6.27.orig/drivers/video/Kconfig ++++ kernel-2.6.27/drivers/video/Kconfig +@@ -7,7 +7,7 @@ + + source "drivers/char/agp/Kconfig" + +-source "drivers/gpu/drm/Kconfig" ++source "drivers/gpu/Kconfig" + + config VGASTATE + tristate +--- git/drivers/gpu/drm-tungsten/ati_pcigart.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,199 @@ ++/** ++ * \file ati_pcigart.c ++ * ATI PCI GART support ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ ++# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) ++ ++#define ATI_PCIE_WRITE 0x4 ++#define ATI_PCIE_READ 0x8 ++ ++static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart) ++{ ++ u32 page_base; ++ ++ page_base = (u32)addr & ATI_PCIGART_PAGE_MASK; ++ switch(gart_info->gart_reg_if) { ++ case DRM_ATI_GART_IGP: ++ page_base |= (upper_32_bits(addr) & 0xff) << 4; ++ page_base |= 0xc; ++ break; ++ case DRM_ATI_GART_PCIE: ++ page_base >>= 8; ++ page_base |= (upper_32_bits(addr) & 0xff) << 24; ++ page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; ++ break; ++ default: ++ case DRM_ATI_GART_PCI: ++ break; ++ } ++ *pci_gart = cpu_to_le32(page_base); ++} ++ ++static int drm_ati_alloc_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info) ++{ ++ gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, ++ PAGE_SIZE, ++ gart_info->table_mask); ++ if (gart_info->table_handle == NULL) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void drm_ati_free_pcigart_table(struct drm_device *dev, ++ struct drm_ati_pcigart_info *gart_info) ++{ ++ drm_pci_free(dev, gart_info->table_handle); ++ gart_info->table_handle = NULL; ++} ++ ++int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) ++{ ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long pages; ++ int i; ++ int max_pages; ++ ++ /* we need to support large memory configurations */ ++ if (!entry) { ++ DRM_ERROR("no scatter/gather memory!\n"); ++ return 0; ++ } ++ ++ if (gart_info->bus_addr) { ++ ++ max_pages = (gart_info->table_size / sizeof(u32)); ++ pages = (entry->pages <= max_pages) ++ ? entry->pages : max_pages; ++ ++ for (i = 0; i < pages; i++) { ++ if (!entry->busaddr[i]) ++ break; ++ pci_unmap_page(dev->pdev, entry->busaddr[i], ++ PAGE_SIZE, PCI_DMA_TODEVICE); ++ } ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) ++ gart_info->bus_addr = 0; ++ } ++ ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN ++ && gart_info->table_handle) { ++ ++ drm_ati_free_pcigart_table(dev, gart_info); ++ } ++ ++ return 1; ++} ++EXPORT_SYMBOL(drm_ati_pcigart_cleanup); ++ ++int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) ++{ ++ struct drm_sg_mem *entry = dev->sg; ++ void *address = NULL; ++ unsigned long pages; ++ u32 *pci_gart; ++ dma_addr_t bus_address = 0; ++ int i, j, ret = 0; ++ int max_pages; ++ dma_addr_t entry_addr; ++ ++ if (!entry) { ++ DRM_ERROR("no scatter/gather memory!\n"); ++ goto done; ++ } ++ ++ if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { ++ DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); ++ ++ ret = drm_ati_alloc_pcigart_table(dev, gart_info); ++ if (ret) { ++ DRM_ERROR("cannot allocate PCI GART page!\n"); ++ goto done; ++ } ++ ++ address = gart_info->table_handle->vaddr; ++ bus_address = gart_info->table_handle->busaddr; ++ } else { ++ address = gart_info->addr; ++ bus_address = gart_info->bus_addr; ++ DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", ++ bus_address, (unsigned long)address); ++ } ++ ++ pci_gart = (u32 *) address; ++ ++ max_pages = (gart_info->table_size / sizeof(u32)); ++ pages = (entry->pages <= max_pages) ++ ? entry->pages : max_pages; ++ ++ memset(pci_gart, 0, max_pages * sizeof(u32)); ++ ++ for (i = 0; i < pages; i++) { ++ /* we need to support large memory configurations */ ++ entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], ++ 0, PAGE_SIZE, PCI_DMA_TODEVICE); ++ if (entry->busaddr[i] == 0) { ++ DRM_ERROR("unable to map PCIGART pages!\n"); ++ drm_ati_pcigart_cleanup(dev, gart_info); ++ address = NULL; ++ bus_address = 0; ++ goto done; ++ } ++ ++ entry_addr = entry->busaddr[i]; ++ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { ++ gart_insert_page_into_table(gart_info, entry_addr, pci_gart); ++ pci_gart++; ++ entry_addr += ATI_PCIGART_PAGE_SIZE; ++ } ++ } ++ ++ ret = 1; ++ ++#if defined(__i386__) || defined(__x86_64__) ++ wbinvd(); ++#else ++ mb(); ++#endif ++ ++ done: ++ gart_info->addr = address; ++ gart_info->bus_addr = bus_address; ++ return ret; ++} ++EXPORT_SYMBOL(drm_ati_pcigart_init); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_agpsupport.c git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c +--- git/drivers/gpu/drm-tungsten/drm_agpsupport.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,715 @@ ++/** ++ * \file drm_agpsupport.c ++ * DRM support for AGP/GART backend ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include ++ ++#if __OS_HAS_AGP ++ ++/** ++ * Get AGP information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a (output) drm_agp_info structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been initialized and acquired and fills in the ++ * drm_agp_info structure with the information in drm_agp_head::agp_info. ++ */ ++int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) ++{ ++ DRM_AGP_KERN *kern; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ ++ kern = &dev->agp->agp_info; ++ info->agp_version_major = kern->version.major; ++ info->agp_version_minor = kern->version.minor; ++ info->mode = kern->mode; ++ info->aperture_base = kern->aper_base; ++ info->aperture_size = kern->aper_size * 1024 * 1024; ++ info->memory_allowed = kern->max_memory << PAGE_SHIFT; ++ info->memory_used = kern->current_memory << PAGE_SHIFT; ++ info->id_vendor = kern->device->vendor; ++ info->id_device = kern->device->device; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_info); ++ ++int drm_agp_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_info *info = data; ++ int err; ++ ++ err = drm_agp_info(dev, info); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/** ++ * Acquire the AGP device. ++ * ++ * \param dev DRM device that is to acquire AGP. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device hasn't been acquired before and calls ++ * \c agp_backend_acquire. ++ */ ++int drm_agp_acquire(struct drm_device * dev) ++{ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ int retcode; ++#endif ++ ++ if (!dev->agp) ++ return -ENODEV; ++ if (dev->agp->acquired) ++ return -EBUSY; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ if ((retcode = agp_backend_acquire())) ++ return retcode; ++#else ++ if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev))) ++ return -ENODEV; ++#endif ++ ++ dev->agp->acquired = 1; ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_acquire); ++ ++/** ++ * Acquire the AGP device (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device hasn't been acquired before and calls ++ * \c agp_backend_acquire. ++ */ ++int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); ++} ++ ++/** ++ * Release the AGP device. ++ * ++ * \param dev DRM device that is to release AGP. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been acquired and calls \c agp_backend_release. ++ */ ++int drm_agp_release(struct drm_device *dev) ++{ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_backend_release(); ++#else ++ agp_backend_release(dev->agp->bridge); ++#endif ++ dev->agp->acquired = 0; ++ return 0; ++ ++} ++EXPORT_SYMBOL(drm_agp_release); ++ ++int drm_agp_release_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return drm_agp_release(dev); ++} ++ ++/** ++ * Enable the AGP bus. ++ * ++ * \param dev DRM device that has previously acquired AGP. ++ * \param mode Requested AGP mode. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device has been acquired but not enabled, and calls ++ * \c agp_enable. ++ */ ++int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) ++{ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ ++ dev->agp->mode = mode.mode; ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_enable(mode.mode); ++#else ++ agp_enable(dev->agp->bridge, mode.mode); ++#endif ++ dev->agp->enabled = 1; ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_enable); ++ ++int drm_agp_enable_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_mode *mode = data; ++ ++ return drm_agp_enable(dev, *mode); ++} ++ ++/** ++ * Allocate AGP memory. ++ * ++ * \param inode device inode. ++ * \param file_priv file private pointer. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_buffer structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired, allocates the ++ * memory via alloc_agp() and creates a drm_agp_mem entry for it. ++ */ ++int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) ++{ ++ struct drm_agp_mem *entry; ++ DRM_AGP_MEM *memory; ++ unsigned long pages; ++ u32 type; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(*entry)); ++ ++ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; ++ type = (u32) request->type; ++ if (!(memory = drm_alloc_agp(dev, pages, type))) { ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ return -ENOMEM; ++ } ++ ++ entry->handle = (unsigned long)memory->key + 1; ++ entry->memory = memory; ++ entry->bound = 0; ++ entry->pages = pages; ++ list_add(&entry->head, &dev->agp->memory); ++ ++ request->handle = entry->handle; ++ request->physical = memory->physical; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_alloc); ++ ++ ++int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_buffer *request = data; ++ ++ return drm_agp_alloc(dev, request); ++} ++ ++/** ++ * Search for the AGP memory entry associated with a handle. ++ * ++ * \param dev DRM device structure. ++ * \param handle AGP memory handle. ++ * \return pointer to the drm_agp_mem structure associated with \p handle. ++ * ++ * Walks through drm_agp_head::memory until finding a matching handle. ++ */ ++static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, ++ unsigned long handle) ++{ ++ struct drm_agp_mem *entry; ++ ++ list_for_each_entry(entry, &dev->agp->memory, head) { ++ if (entry->handle == handle) ++ return entry; ++ } ++ return NULL; ++} ++ ++/** ++ * Unbind AGP memory from the GATT (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_binding structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and acquired, looks-up the AGP memory ++ * entry and passes it to the unbind_agp() function. ++ */ ++int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) ++{ ++ struct drm_agp_mem *entry; ++ int ret; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (!entry->bound) ++ return -EINVAL; ++ ret = drm_unbind_agp(entry->memory); ++ if (ret == 0) ++ entry->bound = 0; ++ return ret; ++} ++EXPORT_SYMBOL(drm_agp_unbind); ++ ++ ++int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_binding *request = data; ++ ++ return drm_agp_unbind(dev, request); ++} ++ ++ ++/** ++ * Bind AGP memory into the GATT (ioctl) ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_binding structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired and that no memory ++ * is currently bound into the GATT. Looks-up the AGP memory entry and passes ++ * it to bind_agp() function. ++ */ ++int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) ++{ ++ struct drm_agp_mem *entry; ++ int retcode; ++ int page; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (entry->bound) ++ return -EINVAL; ++ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE; ++ if ((retcode = drm_bind_agp(entry->memory, page))) ++ return retcode; ++ entry->bound = dev->agp->base + (page << PAGE_SHIFT); ++ DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", ++ dev->agp->base, entry->bound); ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_bind); ++ ++ ++int drm_agp_bind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_binding *request = data; ++ ++ return drm_agp_bind(dev, request); ++} ++ ++ ++/** ++ * Free AGP memory (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_agp_buffer structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the AGP device is present and has been acquired and looks up the ++ * AGP memory entry. If the memory it's currently bound, unbind it via ++ * unbind_agp(). Frees it via free_agp() as well as the entry itself ++ * and unlinks from the doubly linked list it's inserted in. ++ */ ++int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) ++{ ++ struct drm_agp_mem *entry; ++ ++ if (!dev->agp || !dev->agp->acquired) ++ return -EINVAL; ++ if (!(entry = drm_agp_lookup_entry(dev, request->handle))) ++ return -EINVAL; ++ if (entry->bound) ++ drm_unbind_agp(entry->memory); ++ ++ list_del(&entry->head); ++ ++ drm_free_agp(entry->memory, entry->pages); ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ return 0; ++} ++EXPORT_SYMBOL(drm_agp_free); ++ ++ ++ ++int drm_agp_free_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_agp_buffer *request = data; ++ ++ return drm_agp_free(dev, request); ++} ++ ++ ++/** ++ * Initialize the AGP resources. ++ * ++ * \return pointer to a drm_agp_head structure. ++ * ++ * Gets the drm_agp_t structure which is made available by the agpgart module ++ * via the inter_module_* functions. Creates and initializes a drm_agp_head ++ * structure. ++ */ ++struct drm_agp_head *drm_agp_init(struct drm_device *dev) ++{ ++ struct drm_agp_head *head = NULL; ++ ++ if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) ++ return NULL; ++ memset((void *)head, 0, sizeof(*head)); ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ agp_copy_info(&head->agp_info); ++#else ++ head->bridge = agp_find_bridge(dev->pdev); ++ if (!head->bridge) { ++ if (!(head->bridge = agp_backend_acquire(dev->pdev))) { ++ drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); ++ return NULL; ++ } ++ agp_copy_info(head->bridge, &head->agp_info); ++ agp_backend_release(head->bridge); ++ } else { ++ agp_copy_info(head->bridge, &head->agp_info); ++ } ++#endif ++ if (head->agp_info.chipset == NOT_SUPPORTED) { ++ drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS); ++ return NULL; ++ } ++ INIT_LIST_HEAD(&head->memory); ++ head->cant_use_aperture = head->agp_info.cant_use_aperture; ++ head->page_mask = head->agp_info.page_mask; ++ head->base = head->agp_info.aper_base; ++ return head; ++} ++ ++/** Calls agp_allocate_memory() */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type) ++{ ++ return agp_allocate_memory(pages, type); ++} ++#else ++DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, ++ size_t pages, u32 type) ++{ ++ return agp_allocate_memory(bridge, pages, type); ++} ++#endif ++ ++/** Calls agp_free_memory() */ ++int drm_agp_free_memory(DRM_AGP_MEM * handle) ++{ ++ if (!handle) ++ return 0; ++ agp_free_memory(handle); ++ return 1; ++} ++ ++/** Calls agp_bind_memory() */ ++int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start) ++{ ++ if (!handle) ++ return -EINVAL; ++ return agp_bind_memory(handle, start); ++} ++EXPORT_SYMBOL(drm_agp_bind_memory); ++ ++/** Calls agp_unbind_memory() */ ++int drm_agp_unbind_memory(DRM_AGP_MEM * handle) ++{ ++ if (!handle) ++ return -EINVAL; ++ return agp_unbind_memory(handle); ++} ++ ++/** ++ * Binds a collection of pages into AGP memory at the given offset, returning ++ * the AGP memory structure containing them. ++ * ++ * No reference is held on the pages during this time -- it is up to the ++ * caller to handle that. ++ */ ++DRM_AGP_MEM * ++drm_agp_bind_pages(struct drm_device *dev, ++ struct page **pages, ++ unsigned long num_pages, ++ uint32_t gtt_offset) ++{ ++ DRM_AGP_MEM *mem; ++ int ret, i; ++ ++ DRM_DEBUG("drm_agp_populate_ttm\n"); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY); ++#else ++ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, ++ AGP_USER_MEMORY); ++#endif ++ if (mem == NULL) { ++ DRM_ERROR("Failed to allocate memory for %ld pages\n", ++ num_pages); ++ return NULL; ++ } ++ ++ for (i = 0; i < num_pages; i++) ++ mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); ++ mem->page_count = num_pages; ++ ++ mem->is_flushed = true; ++ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); ++ if (ret != 0) { ++ DRM_ERROR("Failed to bind AGP memory: %d\n", ret); ++ agp_free_memory(mem); ++ return NULL; ++ } ++ ++ return mem; ++} ++EXPORT_SYMBOL(drm_agp_bind_pages); ++ ++/* ++ * AGP ttm backend interface. ++ */ ++ ++#ifndef AGP_USER_TYPES ++#define AGP_USER_TYPES (1 << 16) ++#define AGP_USER_MEMORY (AGP_USER_TYPES) ++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) ++#endif ++#define AGP_REQUIRED_MAJOR 0 ++#define AGP_REQUIRED_MINOR 102 ++ ++static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) ++{ ++ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++ ++static int drm_agp_populate(struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ struct page **cur_page, **last_page = pages + num_pages; ++ DRM_AGP_MEM *mem; ++ int dummy_page_count = 0; ++ ++ if (drm_alloc_memctl(num_pages * sizeof(void *))) ++ return -1; ++ ++ DRM_DEBUG("drm_agp_populate_ttm\n"); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY); ++#else ++ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); ++#endif ++ if (!mem) { ++ drm_free_memctl(num_pages * sizeof(void *)); ++ return -1; ++ } ++ ++ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); ++ mem->page_count = 0; ++ for (cur_page = pages; cur_page < last_page; ++cur_page) { ++ struct page *page = *cur_page; ++ if (!page) { ++ page = dummy_read_page; ++ ++dummy_page_count; ++ } ++ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page)); ++ } ++ if (dummy_page_count) ++ DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count); ++ agp_be->mem = mem; ++ return 0; ++} ++ ++static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ int ret; ++ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED); ++ ++ DRM_DEBUG("drm_agp_bind_ttm\n"); ++ mem->is_flushed = true; ++ mem->type = AGP_USER_MEMORY; ++ /* CACHED MAPPED implies not snooped memory */ ++ if (snooped) ++ mem->type = AGP_USER_CACHED_MEMORY; ++ ++ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); ++ if (ret) ++ DRM_ERROR("AGP Bind memory failed\n"); ++ ++ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? ++ DRM_BE_FLAG_BOUND_CACHED : 0, ++ DRM_BE_FLAG_BOUND_CACHED); ++ return ret; ++} ++ ++static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ ++ DRM_DEBUG("drm_agp_unbind_ttm\n"); ++ if (agp_be->mem->is_bound) ++ return drm_agp_unbind_memory(agp_be->mem); ++ else ++ return 0; ++} ++ ++static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be = ++ container_of(backend, struct drm_agp_ttm_backend, backend); ++ DRM_AGP_MEM *mem = agp_be->mem; ++ ++ DRM_DEBUG("drm_agp_clear_ttm\n"); ++ if (mem) { ++ unsigned long num_pages = mem->page_count; ++ backend->func->unbind(backend); ++ agp_free_memory(mem); ++ drm_free_memctl(num_pages * sizeof(void *)); ++ } ++ agp_be->mem = NULL; ++} ++ ++static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) ++{ ++ struct drm_agp_ttm_backend *agp_be; ++ ++ if (backend) { ++ DRM_DEBUG("drm_agp_destroy_ttm\n"); ++ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); ++ if (agp_be) { ++ if (agp_be->mem) ++ backend->func->clear(backend); ++ drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func agp_ttm_backend = { ++ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, ++ .populate = drm_agp_populate, ++ .clear = drm_agp_clear_ttm, ++ .bind = drm_agp_bind_ttm, ++ .unbind = drm_agp_unbind_ttm, ++ .destroy = drm_agp_destroy_ttm, ++}; ++ ++struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) ++{ ++ ++ struct drm_agp_ttm_backend *agp_be; ++ struct agp_kern_info *info; ++ ++ if (!dev->agp) { ++ DRM_ERROR("AGP is not initialized.\n"); ++ return NULL; ++ } ++ info = &dev->agp->agp_info; ++ ++ if (info->version.major != AGP_REQUIRED_MAJOR || ++ info->version.minor < AGP_REQUIRED_MINOR) { ++ DRM_ERROR("Wrong agpgart version %d.%d\n" ++ "\tYou need at least version %d.%d.\n", ++ info->version.major, ++ info->version.minor, ++ AGP_REQUIRED_MAJOR, ++ AGP_REQUIRED_MINOR); ++ return NULL; ++ } ++ ++ ++ agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM); ++ if (!agp_be) ++ return NULL; ++ ++ agp_be->mem = NULL; ++ ++ agp_be->bridge = dev->agp->bridge; ++ agp_be->populated = false; ++ agp_be->backend.func = &agp_ttm_backend; ++ agp_be->backend.dev = dev; ++ ++ return &agp_be->backend; ++} ++EXPORT_SYMBOL(drm_agp_init_ttm); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) ++void drm_agp_chipset_flush(struct drm_device *dev) ++{ ++ agp_flush_chipset(dev->agp->bridge); ++} ++EXPORT_SYMBOL(drm_agp_chipset_flush); ++#endif ++ ++#endif /* __OS_HAS_AGP */ +diff -Nurd git/drivers/gpu/drm-tungsten/drm_auth.c git-nokia/drivers/gpu/drm-tungsten/drm_auth.c +--- git/drivers/gpu/drm-tungsten/drm_auth.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_auth.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,189 @@ ++/** ++ * \file drm_auth.c ++ * IOCTLs for authentication ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Find the file with the given magic number. ++ * ++ * \param dev DRM device. ++ * \param magic magic number. ++ * ++ * Searches in drm_device::magiclist within all files with the same hash key ++ * the one with matching magic number, while holding the drm_device::struct_mutex ++ * lock. ++ */ ++static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) ++{ ++ struct drm_file *retval = NULL; ++ struct drm_magic_entry *pt; ++ struct drm_hash_item *hash; ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); ++ retval = pt->priv; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ return retval; ++} ++ ++/** ++ * Adds a magic number. ++ * ++ * \param dev DRM device. ++ * \param priv file private data. ++ * \param magic magic number. ++ * ++ * Creates a drm_magic_entry structure and appends to the linked list ++ * associated the magic number hash key in drm_device::magiclist, while holding ++ * the drm_device::struct_mutex lock. ++ */ ++static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, ++ drm_magic_t magic) ++{ ++ struct drm_magic_entry *entry; ++ ++ DRM_DEBUG("%d\n", magic); ++ ++ entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); ++ if (!entry) ++ return -ENOMEM; ++ memset(entry, 0, sizeof(*entry)); ++ entry->priv = priv; ++ entry->hash_item.key = (unsigned long)magic; ++ mutex_lock(&dev->struct_mutex); ++ drm_ht_insert_item(&dev->magiclist, &entry->hash_item); ++ list_add_tail(&entry->head, &dev->magicfree); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Remove a magic number. ++ * ++ * \param dev DRM device. ++ * \param magic magic number. ++ * ++ * Searches and unlinks the entry in drm_device::magiclist with the magic ++ * number hash key, while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) ++{ ++ struct drm_magic_entry *pt; ++ struct drm_hash_item *hash; ++ ++ DRM_DEBUG("%d\n", magic); ++ ++ mutex_lock(&dev->struct_mutex); ++ if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); ++ drm_ht_remove_item(&dev->magiclist, hash); ++ list_del(&pt->head); ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); ++ ++ return 0; ++} ++ ++/** ++ * Get a unique magic number (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a resulting drm_auth structure. ++ * \return zero on success, or a negative number on failure. ++ * ++ * If there is a magic number in drm_file::magic then use it, otherwise ++ * searches an unique non-zero magic number and add it associating it with \p ++ * file_priv. ++ */ ++int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ static drm_magic_t sequence = 0; ++ static DEFINE_SPINLOCK(lock); ++ struct drm_auth *auth = data; ++ ++ /* Find unique magic */ ++ if (file_priv->magic) { ++ auth->magic = file_priv->magic; ++ } else { ++ do { ++ spin_lock(&lock); ++ if (!sequence) ++ ++sequence; /* reserve 0 */ ++ auth->magic = sequence++; ++ spin_unlock(&lock); ++ } while (drm_find_file(dev, auth->magic)); ++ file_priv->magic = auth->magic; ++ drm_add_magic(dev, file_priv, auth->magic); ++ } ++ ++ DRM_DEBUG("%u\n", auth->magic); ++ ++ return 0; ++} ++ ++/** ++ * Authenticate with a magic. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_auth structure. ++ * \return zero if authentication successed, or a negative number otherwise. ++ * ++ * Checks if \p file_priv is associated with the magic number passed in \arg. ++ */ ++int drm_authmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_auth *auth = data; ++ struct drm_file *file; ++ ++ DRM_DEBUG("%u\n", auth->magic); ++ if ((file = drm_find_file(dev, auth->magic))) { ++ file->authenticated = 1; ++ drm_remove_magic(dev, auth->magic); ++ return 0; ++ } ++ return -EINVAL; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo.c git-nokia/drivers/gpu/drm-tungsten/drm_bo.c +--- git/drivers/gpu/drm-tungsten/drm_bo.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,2796 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++/* ++ * Locking may look a bit complicated but isn't really: ++ * ++ * The buffer usage atomic_t needs to be protected by dev->struct_mutex ++ * when there is a chance that it can be zero before or after the operation. ++ * ++ * dev->struct_mutex also protects all lists and list heads, ++ * Hash tables and hash heads. ++ * ++ * bo->mutex protects the buffer object itself excluding the usage field. ++ * bo->mutex does also protect the buffer list heads, so to manipulate those, ++ * we need both the bo->mutex and the dev->struct_mutex. ++ * ++ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal ++ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, ++ * the list traversal will, in general, need to be restarted. ++ * ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo); ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); ++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); ++static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); ++ ++static inline uint64_t drm_bo_type_flags(unsigned type) ++{ ++ return (1ULL << (24 + type)); ++} ++ ++/* ++ * bo locked. dev->struct_mutex locked. ++ */ ++ ++void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ ++ man = &bo->dev->bm.man[bo->pinned_mem_type]; ++ list_add_tail(&bo->pinned_lru, &man->pinned); ++} ++ ++void drm_bo_add_to_lru(struct drm_buffer_object *bo) ++{ ++ struct drm_mem_type_manager *man; ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ ++ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) ++ || bo->mem.mem_type != bo->pinned_mem_type) { ++ man = &bo->dev->bm.man[bo->mem.mem_type]; ++ list_add_tail(&bo->lru, &man->lru); ++ } else { ++ INIT_LIST_HEAD(&bo->lru); ++ } ++} ++ ++static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return 0; ++ ++ ret = drm_bo_lock_kmm(bo); ++ if (ret) ++ return ret; ++ drm_bo_unmap_virtual(bo); ++ if (old_is_pci) ++ drm_bo_finish_unmap(bo); ++#else ++ if (!bo->map_list.map) ++ return 0; ++ ++ drm_bo_unmap_virtual(bo); ++#endif ++ return 0; ++} ++ ++static void drm_bo_vm_post_move(struct drm_buffer_object *bo) ++{ ++#ifdef DRM_ODD_MM_COMPAT ++ int ret; ++ ++ if (!bo->map_list.map) ++ return; ++ ++ ret = drm_bo_remap_bound(bo); ++ if (ret) { ++ DRM_ERROR("Failed to remap a bound buffer object.\n" ++ "\tThis might cause a sigbus later.\n"); ++ } ++ drm_bo_unlock_kmm(bo); ++#endif ++} ++ ++/* ++ * Call bo->mutex locked. ++ */ ++ ++static int drm_bo_add_ttm(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ uint32_t page_flags = 0; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ bo->ttm = NULL; ++ ++ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) ++ page_flags |= DRM_TTM_PAGE_WRITE; ++ ++ switch (bo->type) { ++ case drm_bo_type_device: ++ case drm_bo_type_kernel: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags, dev->bm.dummy_read_page); ++ if (!bo->ttm) ++ ret = -ENOMEM; ++ break; ++ case drm_bo_type_user: ++ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, ++ page_flags | DRM_TTM_PAGE_USER, ++ dev->bm.dummy_read_page); ++ if (!bo->ttm) ++ ret = -ENOMEM; ++ ++ ret = drm_ttm_set_user(bo->ttm, current, ++ bo->buffer_start, ++ bo->num_pages); ++ if (ret) ++ return ret; ++ ++ break; ++ default: ++ DRM_ERROR("Illegal buffer object type\n"); ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, ++ int evict, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); ++ int new_is_pci = drm_mem_reg_is_pci(dev, mem); ++ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; ++ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; ++ int ret = 0; ++ ++ if (old_is_pci || new_is_pci || ++ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ++ ret = drm_bo_vm_pre_move(bo, old_is_pci); ++ if (ret) ++ return ret; ++ ++ /* ++ * Create and bind a ttm if required. ++ */ ++ ++ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ goto out_err; ++ ++ if (mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(bo->ttm, mem); ++ if (ret) ++ goto out_err; ++ } ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) { ++ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ ++ *old_mem = *mem; ++ mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, mem->flags, ++ DRM_BO_MASK_MEMTYPE); ++ goto moved; ++ } ++ ++ } ++ ++ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && ++ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ ret = drm_bo_move_ttm(bo, evict, no_wait, mem); ++ else if (dev->driver->bo_driver->move) ++ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); ++ else ++ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); ++ ++ if (ret) ++ goto out_err; ++ ++moved: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { ++ ret = ++ dev->driver->bo_driver->invalidate_caches(dev, ++ bo->mem.flags); ++ if (ret) ++ DRM_ERROR("Can not flush read caches\n"); ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, ++ (evict) ? _DRM_BO_FLAG_EVICTED : 0, ++ _DRM_BO_FLAG_EVICTED); ++ ++ if (bo->mem.mm_node) ++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + ++ bm->man[bo->mem.mem_type].gpu_offset; ++ ++ ++ return 0; ++ ++out_err: ++ if (old_is_pci || new_is_pci) ++ drm_bo_vm_post_move(bo); ++ ++ new_man = &bm->man[bo->mem.mem_type]; ++ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ return ret; ++} ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ */ ++ ++static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static int drm_bo_check_unfenced(struct drm_buffer_object *bo) ++{ ++ int ret; ++ ++ mutex_lock(&bo->mutex); ++ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++/* ++ * Call bo->mutex locked. ++ * Wait until the buffer is idle. ++ */ ++ ++int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced) ++{ ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&bo->mutex); ++ while(unlikely(drm_bo_busy(bo, check_unfenced))) { ++ if (no_wait) ++ return -EBUSY; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) { ++ mutex_unlock(&bo->mutex); ++ wait_event(bo->event_queue, !drm_bo_check_unfenced(bo)); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ } ++ ++ if (bo->fence) { ++ struct drm_fence_object *fence; ++ uint32_t fence_type = bo->fence_type; ++ ++ drm_fence_reference_unlocked(&fence, bo->fence); ++ mutex_unlock(&bo->mutex); ++ ++ ret = drm_fence_object_wait(fence, lazy, !interruptible, ++ fence_type); ++ ++ drm_fence_usage_deref_unlocked(&fence); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ if (ret) ++ return ret; ++ } ++ ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_wait); ++ ++static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ if (bo->fence) { ++ if (bm->nice_mode) { ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ int ret; ++ do { ++ ret = drm_bo_wait(bo, 0, 0, 0, 0); ++ if (ret && allow_errors) ++ return ret; ++ ++ } while (ret && !time_after_eq(jiffies, _end)); ++ ++ if (bo->fence) { ++ bm->nice_mode = 0; ++ DRM_ERROR("Detected GPU lockup or " ++ "fence driver was taken down. " ++ "Evicting buffer.\n"); ++ } ++ } ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ return 0; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ * Attempts to remove all private references to a buffer by expiring its ++ * fence object and removing from lru lists and memory managers. ++ */ ++ ++static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ atomic_inc(&bo->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&bo->mutex); ++ ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ ++ if (bo->fence && drm_fence_object_signaled(bo->fence, ++ bo->fence_type)) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ++ if (bo->fence && remove_all) ++ (void)drm_bo_expire_fence(bo, 0); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!atomic_dec_and_test(&bo->usage)) ++ goto out; ++ ++ if (!bo->fence) { ++ list_del_init(&bo->lru); ++ if (bo->mem.mm_node) { ++ drm_mm_put_block(bo->mem.mm_node); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ bo->mem.mm_node = NULL; ++ } ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ list_del_init(&bo->ddestroy); ++ mutex_unlock(&bo->mutex); ++ drm_bo_destroy_locked(bo); ++ return; ++ } ++ ++ if (list_empty(&bo->ddestroy)) { ++ drm_fence_object_flush(bo->fence, bo->fence_type); ++ list_add_tail(&bo->ddestroy, &bm->ddestroy); ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return; ++} ++ ++/* ++ * Verify that refcount is 0 and that there are no internal references ++ * to the buffer object. Then destroy it. ++ */ ++ ++static void drm_bo_destroy_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && ++ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && ++ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { ++ if (bo->fence != NULL) { ++ DRM_ERROR("Fence was non-zero.\n"); ++ drm_bo_cleanup_refs(bo, 0); ++ return; ++ } ++ ++#ifdef DRM_ODD_MM_COMPAT ++ BUG_ON(!list_empty(&bo->vma_list)); ++ BUG_ON(!list_empty(&bo->p_mm_list)); ++#endif ++ ++ if (bo->ttm) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ ++ atomic_dec(&bm->count); ++ ++ drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ return; ++ } ++ ++ /* ++ * Some stuff is still trying to reference the buffer object. ++ * Get rid of those references. ++ */ ++ ++ drm_bo_cleanup_refs(bo, 0); ++ ++ return; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ */ ++ ++static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ struct drm_buffer_object *entry, *nentry; ++ struct list_head *list, *next; ++ ++ list_for_each_safe(list, next, &bm->ddestroy) { ++ entry = list_entry(list, struct drm_buffer_object, ddestroy); ++ ++ nentry = NULL; ++ if (next != &bm->ddestroy) { ++ nentry = list_entry(next, struct drm_buffer_object, ++ ddestroy); ++ atomic_inc(&nentry->usage); ++ } ++ ++ drm_bo_cleanup_refs(entry, remove_all); ++ ++ if (nentry) ++ atomic_dec(&nentry->usage); ++ } ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++static void drm_bo_delayed_workqueue(void *data) ++#else ++static void drm_bo_delayed_workqueue(struct work_struct *work) ++#endif ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct drm_device *dev = (struct drm_device *) data; ++ struct drm_buffer_manager *bm = &dev->bm; ++#else ++ struct drm_buffer_manager *bm = ++ container_of(work, struct drm_buffer_manager, wq.work); ++ struct drm_device *dev = container_of(bm, struct drm_device, bm); ++#endif ++ ++ DRM_DEBUG("Delayed delete Worker\n"); ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ mutex_unlock(&dev->struct_mutex); ++ return; ++ } ++ drm_bo_delayed_delete(dev, 0); ++ if (bm->initialized && !list_empty(&bm->ddestroy)) { ++ schedule_delayed_work(&bm->wq, ++ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ bo = NULL; ++ ++ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); ++ ++ if (atomic_dec_and_test(&tmp_bo->usage)) ++ drm_bo_destroy_locked(tmp_bo); ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_locked); ++ ++static void drm_bo_base_deref_locked(struct drm_file *file_priv, ++ struct drm_user_object *uo) ++{ ++ struct drm_buffer_object *bo = ++ drm_user_object_entry(uo, struct drm_buffer_object, base); ++ ++ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); ++ ++ drm_bo_takedown_vm_locked(bo); ++ drm_bo_usage_deref_locked(&bo); ++} ++ ++void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) ++{ ++ struct drm_buffer_object *tmp_bo = *bo; ++ struct drm_device *dev = tmp_bo->dev; ++ ++ *bo = NULL; ++ if (atomic_dec_and_test(&tmp_bo->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_bo->usage) == 0) ++ drm_bo_destroy_locked(tmp_bo); ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); ++ ++void drm_putback_buffer_objects(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct list_head *list = &bm->unfenced; ++ struct drm_buffer_object *entry, *next; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(entry, next, list, lru) { ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ ++ mutex_lock(&entry->mutex); ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ mutex_lock(&dev->struct_mutex); ++ ++ list_del_init(&entry->lru); ++ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ ++ /* ++ * FIXME: Might want to put back on head of list ++ * instead of tail here. ++ */ ++ ++ drm_bo_add_to_lru(entry); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_putback_buffer_objects); ++ ++ ++/* ++ * Note. The caller has to register (if applicable) ++ * and deregister fence object usage. ++ */ ++ ++int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ uint32_t fence_type = 0; ++ uint32_t fence_class = ~0; ++ int count = 0; ++ int ret = 0; ++ struct list_head *l; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!list) ++ list = &bm->unfenced; ++ ++ if (fence) ++ fence_class = fence->fence_class; ++ ++ list_for_each_entry(entry, list, lru) { ++ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); ++ fence_type |= entry->new_fence_type; ++ if (fence_class == ~0) ++ fence_class = entry->new_fence_class; ++ else if (entry->new_fence_class != fence_class) { ++ DRM_ERROR("Unmatching fence classes on unfenced list: " ++ "%d and %d.\n", ++ fence_class, ++ entry->new_fence_class); ++ ret = -EINVAL; ++ goto out; ++ } ++ count++; ++ } ++ ++ if (!count) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (fence) { ++ if ((fence_type & fence->type) != fence_type || ++ (fence->fence_class != fence_class)) { ++ DRM_ERROR("Given fence doesn't match buffers " ++ "on unfenced list.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ } else { ++ mutex_unlock(&dev->struct_mutex); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &fence); ++ mutex_lock(&dev->struct_mutex); ++ if (ret) ++ goto out; ++ } ++ ++ count = 0; ++ l = list->next; ++ while (l != list) { ++ prefetch(l->next); ++ entry = list_entry(l, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(l); ++ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ count++; ++ if (entry->fence) ++ drm_fence_usage_deref_locked(&entry->fence); ++ entry->fence = drm_fence_reference_locked(fence); ++ entry->fence_class = entry->new_fence_class; ++ entry->fence_type = entry->new_fence_type; ++ DRM_FLAG_MASKED(entry->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ wake_up_all(&entry->event_queue); ++ drm_bo_add_to_lru(entry); ++ } ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_locked(&entry); ++ l = list->next; ++ } ++ DRM_DEBUG("Fenced %d buffers\n", count); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ *used_fence = fence; ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_buffer_objects); ++ ++/* ++ * bo->mutex locked ++ */ ++ ++static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, ++ int no_wait) ++{ ++ int ret = 0; ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg evict_mem; ++ ++ /* ++ * Someone might have modified the buffer before we took the ++ * buffer mutex. ++ */ ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ if (unlikely(bo->mem.flags & ++ (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) ++ goto out_unlock; ++ if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ goto out_unlock; ++ if (unlikely(bo->mem.mem_type != mem_type)) ++ goto out_unlock; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 0); ++ if (ret) ++ goto out_unlock; ++ ++ } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ ++ evict_mem = bo->mem; ++ evict_mem.mm_node = NULL; ++ ++ evict_mem = bo->mem; ++ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo); ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_mem_space(bo, &evict_mem, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed to find memory space for " ++ "buffer 0x%p eviction.\n", bo); ++ goto out; ++ } ++ ++ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); ++ ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Buffer eviction failed\n"); ++ goto out; ++ } ++ ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, ++ _DRM_BO_FLAG_EVICTED); ++ ++out: ++ mutex_lock(&dev->struct_mutex); ++ if (evict_mem.mm_node) { ++ if (evict_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(evict_mem.mm_node); ++ evict_mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++/** ++ * Repeatedly evict memory from the LRU for @mem_type until we create enough ++ * space, or we've evicted everything and there isn't enough space. ++ */ ++static int drm_bo_mem_force_space(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ uint32_t mem_type, int no_wait) ++{ ++ struct drm_mm_node *node; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *entry; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ struct list_head *lru; ++ unsigned long num_pages = mem->num_pages; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ do { ++ node = drm_mm_search_free(&man->manager, num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ break; ++ ++ lru = &man->lru; ++ if (lru->next == lru) ++ break; ++ ++ entry = list_entry(lru->next, struct drm_buffer_object, lru); ++ atomic_inc(&entry->usage); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_lock(&entry->mutex); ++ ret = drm_bo_evict(entry, mem_type, no_wait); ++ mutex_unlock(&entry->mutex); ++ drm_bo_usage_deref_unlocked(&entry); ++ if (ret) ++ return ret; ++ mutex_lock(&dev->struct_mutex); ++ } while (1); ++ ++ if (!node) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ node = drm_mm_get_block(node, num_pages, mem->page_alignment); ++ if (unlikely(!node)) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ return 0; ++} ++ ++static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, ++ int disallow_fixed, ++ uint32_t mem_type, ++ uint64_t mask, uint32_t *res_mask) ++{ ++ uint64_t cur_flags = drm_bo_type_flags(mem_type); ++ uint64_t flag_diff; ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) ++ return 0; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) ++ cur_flags |= DRM_BO_FLAG_CACHED; ++ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) ++ cur_flags |= DRM_BO_FLAG_MAPPABLE; ++ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) ++ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); ++ ++ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) { ++ *res_mask = cur_flags; ++ return 1; ++ } ++ ++ flag_diff = (mask ^ cur_flags); ++ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) ++ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; ++ ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (!(mask & DRM_BO_FLAG_CACHED) || ++ (mask & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mask & DRM_BO_FLAG_MAPPABLE) || ++ (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ ++ *res_mask = cur_flags; ++ return 1; ++} ++ ++/** ++ * Creates space for memory region @mem according to its type. ++ * ++ * This function first searches for free space in compatible memory types in ++ * the priority order defined by the driver. If free space isn't found, then ++ * drm_bo_mem_force_space is attempted in priority order to evict and find ++ * space. ++ */ ++int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man; ++ ++ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; ++ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; ++ uint32_t i; ++ uint32_t mem_type = DRM_BO_MEM_LOCAL; ++ uint32_t cur_flags; ++ int type_found = 0; ++ int type_ok = 0; ++ int has_eagain = 0; ++ struct drm_mm_node *node = NULL; ++ int ret; ++ ++ mem->mm_node = NULL; ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ type_ok = drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, mem->proposed_flags, ++ &cur_flags); ++ ++ if (!type_ok) ++ continue; ++ ++ if (mem_type == DRM_BO_MEM_LOCAL) ++ break; ++ ++ if ((mem_type == bo->pinned_mem_type) && ++ (bo->pinned_node != NULL)) { ++ node = bo->pinned_node; ++ break; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (man->has_type && man->use_type) { ++ type_found = 1; ++ node = drm_mm_search_free(&man->manager, mem->num_pages, ++ mem->page_alignment, 1); ++ if (node) ++ node = drm_mm_get_block(node, mem->num_pages, ++ mem->page_alignment); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ if (node) ++ break; ++ } ++ ++ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { ++ mem->mm_node = node; ++ mem->mem_type = mem_type; ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (!type_found) ++ return -EINVAL; ++ ++ num_prios = dev->driver->bo_driver->num_mem_busy_prio; ++ prios = dev->driver->bo_driver->mem_busy_prio; ++ ++ for (i = 0; i < num_prios; ++i) { ++ mem_type = prios[i]; ++ man = &bm->man[mem_type]; ++ ++ if (!man->has_type) ++ continue; ++ ++ if (!drm_bo_mt_compatible(man, ++ bo->type == drm_bo_type_user, ++ mem_type, ++ mem->proposed_flags, ++ &cur_flags)) ++ continue; ++ ++ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); ++ ++ if (ret == 0 && mem->mm_node) { ++ mem->flags = cur_flags; ++ return 0; ++ } ++ ++ if (ret == -EAGAIN) ++ has_eagain = 1; ++ } ++ ++ ret = (has_eagain) ? -EAGAIN : -ENOMEM; ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_mem_space); ++ ++/* ++ * drm_bo_propose_flags: ++ * ++ * @bo: the buffer object getting new flags ++ * ++ * @new_flags: the new set of proposed flag bits ++ * ++ * @new_mask: the mask of bits changed in new_flags ++ * ++ * Modify the proposed_flag bits in @bo ++ */ ++static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, ++ uint64_t new_flags, uint64_t new_mask) ++{ ++ uint32_t new_access; ++ ++ /* Copy unchanging bits from existing proposed_flags */ ++ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask); ++ ++ if (bo->type == drm_bo_type_user && ++ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) != ++ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { ++ DRM_ERROR("User buffers require cache-coherent memory.\n"); ++ return -EINVAL; ++ } ++ ++ if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n"); ++ return -EPERM; ++ } ++ ++ if (likely(new_mask & DRM_BO_MASK_MEM) && ++ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) && ++ !DRM_SUSER(DRM_CURPROC)) { ++ if (likely(bo->mem.flags & new_flags & new_mask & ++ DRM_BO_MASK_MEM)) ++ new_flags = (new_flags & ~DRM_BO_MASK_MEM) | ++ (bo->mem.flags & DRM_BO_MASK_MEM); ++ else { ++ DRM_ERROR("Incompatible memory type specification " ++ "for NO_EVICT buffer.\n"); ++ return -EPERM; ++ } ++ } ++ ++ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { ++ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); ++ return -EPERM; ++ } ++ ++ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | ++ DRM_BO_FLAG_READ); ++ ++ if (new_access == 0) { ++ DRM_ERROR("Invalid buffer object rwx properties\n"); ++ return -EINVAL; ++ } ++ ++ bo->mem.proposed_flags = new_flags; ++ return 0; ++} ++ ++/* ++ * Call dev->struct_mutex locked. ++ */ ++ ++struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, ++ uint32_t handle, int check_owner) ++{ ++ struct drm_user_object *uo; ++ struct drm_buffer_object *bo; ++ ++ uo = drm_lookup_user_object(file_priv, handle); ++ ++ if (!uo || (uo->type != drm_buffer_type)) { ++ DRM_ERROR("Could not find buffer object 0x%08x\n", handle); ++ return NULL; ++ } ++ ++ if (check_owner && file_priv != uo->owner) { ++ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) ++ return NULL; ++ } ++ ++ bo = drm_user_object_entry(uo, struct drm_buffer_object, base); ++ atomic_inc(&bo->usage); ++ return bo; ++} ++EXPORT_SYMBOL(drm_lookup_buffer_object); ++ ++/* ++ * Call bo->mutex locked. ++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. ++ * Doesn't do any fence flushing as opposed to the drm_bo_busy function. ++ */ ++ ++static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) ++{ ++ struct drm_fence_object *fence = bo->fence; ++ ++ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) ++ return -EBUSY; ++ ++ if (fence) { ++ if (drm_fence_object_signaled(fence, bo->fence_type)) { ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ return 0; ++ } ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++int drm_bo_evict_cached(struct drm_buffer_object *bo) ++{ ++ int ret = 0; ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); ++ if (bo->mem.mm_node) ++ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); ++ return ret; ++} ++ ++EXPORT_SYMBOL(drm_bo_evict_cached); ++/* ++ * Wait until a buffer is unmapped. ++ */ ++ ++static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) ++{ ++ int ret = 0; ++ ++ if (likely(atomic_read(&bo->mapped)) == 0) ++ return 0; ++ ++ if (unlikely(no_wait)) ++ return -EBUSY; ++ ++ do { ++ mutex_unlock(&bo->mutex); ++ ret = wait_event_interruptible(bo->event_queue, ++ atomic_read(&bo->mapped) == 0); ++ mutex_lock(&bo->mutex); ++ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; ++ ++ if (ret == -ERESTARTSYS) ++ ret = -EAGAIN; ++ } while((ret == 0) && atomic_read(&bo->mapped) > 0); ++ ++ return ret; ++} ++ ++/* ++ * Fill in the ioctl reply argument with buffer info. ++ * Bo locked. ++ */ ++ ++void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, ++ struct drm_bo_info_rep *rep) ++{ ++ if (!rep) ++ return; ++ ++ rep->handle = bo->base.hash.key; ++ rep->flags = bo->mem.flags; ++ rep->size = bo->num_pages * PAGE_SIZE; ++ rep->offset = bo->offset; ++ ++ /* ++ * drm_bo_type_device buffers have user-visible ++ * handles which can be used to share across ++ * processes. Hand that back to the application ++ */ ++ if (bo->type == drm_bo_type_device) ++ rep->arg_handle = bo->map_list.user_token; ++ else ++ rep->arg_handle = 0; ++ ++ rep->proposed_flags = bo->mem.proposed_flags; ++ rep->buffer_start = bo->buffer_start; ++ rep->fence_flags = bo->fence_type; ++ rep->rep_flags = 0; ++ rep->page_alignment = bo->mem.page_alignment; ++ ++ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) { ++ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, ++ DRM_BO_REP_BUSY); ++ } ++} ++EXPORT_SYMBOL(drm_bo_fill_rep_arg); ++ ++/* ++ * Wait for buffer idle and register that we've mapped the buffer. ++ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, ++ * so that if the client dies, the mapping is automatically ++ * unregistered. ++ */ ++ ++static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, ++ uint32_t map_flags, unsigned hint, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_buffer_object *bo; ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret = 0; ++ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ if (unlikely(ret)) ++ goto out; ++ ++ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) ++ drm_bo_evict_cached(bo); ++ ++ } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); ++ ++ atomic_inc(&bo->mapped); ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) { ++ if (atomic_dec_and_test(&bo->mapped)) ++ wake_up_all(&bo->event_queue); ++ ++ } else ++ drm_bo_fill_rep_arg(bo, rep); ++ ++ out: ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ return ret; ++} ++ ++static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ struct drm_ref_object *ro; ++ int ret = 0; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ if (!bo) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); ++ if (!ro) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ drm_remove_ref_object(file_priv, ro); ++ drm_bo_usage_deref_locked(&bo); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/* ++ * Call struct-sem locked. ++ */ ++ ++static void drm_buffer_user_object_unmap(struct drm_file *file_priv, ++ struct drm_user_object *uo, ++ enum drm_ref_type action) ++{ ++ struct drm_buffer_object *bo = ++ drm_user_object_entry(uo, struct drm_buffer_object, base); ++ ++ /* ++ * We DON'T want to take the bo->lock here, because we want to ++ * hold it when we wait for unmapped buffer. ++ */ ++ ++ BUG_ON(action != _DRM_REF_TYPE1); ++ ++ if (atomic_dec_and_test(&bo->mapped)) ++ wake_up_all(&bo->event_queue); ++} ++ ++/* ++ * bo->mutex locked. ++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags. ++ */ ++ ++int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, ++ int no_wait, int move_unfenced) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ struct drm_bo_mem_reg mem; ++ ++ BUG_ON(bo->fence != NULL); ++ ++ mem.num_pages = bo->num_pages; ++ mem.size = mem.num_pages << PAGE_SHIFT; ++ mem.proposed_flags = new_mem_flags; ++ mem.page_alignment = bo->mem.page_alignment; ++ ++ mutex_lock(&bm->evict_mutex); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->lru); ++ mutex_unlock(&dev->struct_mutex); ++ ++ /* ++ * Determine where to move the buffer. ++ */ ++ ret = drm_bo_mem_space(bo, &mem, no_wait); ++ if (ret) ++ goto out_unlock; ++ ++ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); ++ ++out_unlock: ++ mutex_lock(&dev->struct_mutex); ++ if (ret || !move_unfenced) { ++ if (mem.mm_node) { ++ if (mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(mem.mm_node); ++ mem.mm_node = NULL; ++ } ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } else { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&bm->evict_mutex); ++ return ret; ++} ++ ++static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) ++{ ++ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags); ++ ++ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0) ++ return 0; ++ if ((flag_diff & DRM_BO_FLAG_CACHED) && ++ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/ ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING))) ++ return 0; ++ ++ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ++ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) || ++ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE))) ++ return 0; ++ return 1; ++} ++ ++/** ++ * drm_buffer_object_validate: ++ * ++ * @bo: the buffer object to modify ++ * ++ * @fence_class: the new fence class covering this buffer ++ * ++ * @move_unfenced: a boolean indicating whether switching the ++ * memory space of this buffer should cause the buffer to ++ * be placed on the unfenced list. ++ * ++ * @no_wait: whether this function should return -EBUSY instead ++ * of waiting. ++ * ++ * Change buffer access parameters. This can involve moving ++ * the buffer to the correct memory type, pinning the buffer ++ * or changing the class/type of fence covering this buffer ++ * ++ * Must be called with bo locked. ++ */ ++ ++static int drm_buffer_object_validate(struct drm_buffer_object *bo, ++ uint32_t fence_class, ++ int move_unfenced, int no_wait, ++ int move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret; ++ ++ if (move_buffer) { ++ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait, ++ move_unfenced); ++ if (ret) { ++ if (ret != -EAGAIN) ++ DRM_ERROR("Failed moving buffer.\n"); ++ if (ret == -ENOMEM) ++ DRM_ERROR("Out of aperture space or " ++ "DRM memory quota.\n"); ++ return ret; ++ } ++ } ++ ++ /* ++ * Pinned buffers. ++ */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { ++ bo->pinned_mem_type = bo->mem.mem_type; ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ drm_bo_add_to_pinned_lru(bo); ++ ++ if (bo->pinned_node != bo->mem.mm_node) { ++ if (bo->pinned_node != NULL) ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = bo->mem.mm_node; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ } else if (bo->pinned_node != NULL) { ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (bo->pinned_node != bo->mem.mm_node) ++ drm_mm_put_block(bo->pinned_node); ++ ++ list_del_init(&bo->pinned_lru); ++ bo->pinned_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ /* ++ * We might need to add a TTM. ++ */ ++ ++ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { ++ ret = drm_bo_add_ttm(bo); ++ if (ret) ++ return ret; ++ } ++ /* ++ * Validation has succeeded, move the access and other ++ * non-mapping-related flag bits from the proposed flags to ++ * the active flags ++ */ ++ ++ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE); ++ ++ /* ++ * Finally, adjust lru to be sure. ++ */ ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del(&bo->lru); ++ if (move_unfenced) { ++ list_add_tail(&bo->lru, &bm->unfenced); ++ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, ++ _DRM_BO_FLAG_UNFENCED); ++ } else { ++ drm_bo_add_to_lru(bo); ++ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { ++ wake_up_all(&bo->event_queue); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, ++ _DRM_BO_FLAG_UNFENCED); ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/* ++ * This function is called with bo->mutex locked, but may release it ++ * temporarily to wait for events. ++ */ ++ ++static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, ++ uint64_t flags, ++ uint64_t mask, ++ uint32_t hint, ++ uint32_t fence_class, ++ int no_wait, ++ int *move_buffer) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ uint32_t ftype; ++ ++ int ret; ++ ++ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", ++ (unsigned long long) bo->mem.proposed_flags, ++ (unsigned long long) bo->mem.flags); ++ ++ ret = drm_bo_modify_proposed_flags (bo, flags, mask); ++ if (ret) ++ return ret; ++ ++ ret = drm_bo_wait_unmapped(bo, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = driver->fence_type(bo, &fence_class, &ftype); ++ ++ if (ret) { ++ DRM_ERROR("Driver did not support given buffer permissions.\n"); ++ return ret; ++ } ++ ++ /* ++ * We're switching command submission mechanism, ++ * or cannot simply rely on the hardware serializing for us. ++ * Insert a driver-dependant barrier or wait for buffer idle. ++ */ ++ ++ if ((fence_class != bo->fence_class) || ++ ((ftype ^ bo->fence_type) & bo->fence_type)) { ++ ++ ret = -EINVAL; ++ if (driver->command_stream_barrier) { ++ ret = driver->command_stream_barrier(bo, ++ fence_class, ++ ftype, ++ no_wait); ++ } ++ if (ret && ret != -EAGAIN) ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ ++ if (ret) ++ return ret; ++ } ++ ++ bo->new_fence_class = fence_class; ++ bo->new_fence_type = ftype; ++ ++ /* ++ * Check whether we need to move buffer. ++ */ ++ ++ *move_buffer = 0; ++ if (!drm_bo_mem_compat(&bo->mem)) { ++ *move_buffer = 1; ++ ret = drm_bo_wait(bo, 0, 1, no_wait, 1); ++ } ++ ++ return ret; ++} ++ ++/** ++ * drm_bo_do_validate: ++ * ++ * @bo: the buffer object ++ * ++ * @flags: access rights, mapping parameters and cacheability. See ++ * the DRM_BO_FLAG_* values in drm.h ++ * ++ * @mask: Which flag values to change; this allows callers to modify ++ * things without knowing the current state of other flags. ++ * ++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* ++ * values in drm.h. ++ * ++ * @fence_class: a driver-specific way of doing fences. Presumably, ++ * this would be used if the driver had more than one submission and ++ * fencing mechanism. At this point, there isn't any use of this ++ * from the user mode code. ++ * ++ * @rep: To be stuffed with the reply from validation ++ * ++ * 'validate' a buffer object. This changes where the buffer is ++ * located, along with changing access modes. ++ */ ++ ++int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep) ++{ ++ int ret; ++ int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0; ++ int move_buffer; ++ ++ mutex_lock(&bo->mutex); ++ ++ do { ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ ret = drm_bo_prepare_for_validate(bo, flags, mask, hint, ++ fence_class, no_wait, ++ &move_buffer); ++ if (ret) ++ goto out; ++ ++ } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED)); ++ ++ ret = drm_buffer_object_validate(bo, ++ fence_class, ++ !(hint & DRM_BO_HINT_DONT_FENCE), ++ no_wait, ++ move_buffer); ++ ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++out: ++ if (rep) ++ drm_bo_fill_rep_arg(bo, rep); ++ ++ mutex_unlock(&bo->mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_do_validate); ++ ++/** ++ * drm_bo_handle_validate ++ * ++ * @file_priv: the drm file private, used to get a handle to the user context ++ * ++ * @handle: the buffer object handle ++ * ++ * @flags: access rights, mapping parameters and cacheability. See ++ * the DRM_BO_FLAG_* values in drm.h ++ * ++ * @mask: Which flag values to change; this allows callers to modify ++ * things without knowing the current state of other flags. ++ * ++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* ++ * values in drm.h. ++ * ++ * @fence_class: a driver-specific way of doing fences. Presumably, ++ * this would be used if the driver had more than one submission and ++ * fencing mechanism. At this point, there isn't any use of this ++ * from the user mode code. ++ * ++ * @rep: To be stuffed with the reply from validation ++ * ++ * @bp_rep: To be stuffed with the buffer object pointer ++ * ++ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead ++ * of a pointer to a buffer object. Optionally return a pointer to the buffer object. ++ * This is a convenience wrapper only. ++ */ ++ ++int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, ++ uint64_t flags, uint64_t mask, ++ uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep, ++ struct drm_buffer_object **bo_rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ if (bo->base.owner != file_priv) ++ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); ++ ++ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep); ++ ++ if (!ret && bo_rep) ++ *bo_rep = bo; ++ else ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_handle_validate); ++ ++ ++static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ ++ /* ++ * FIXME: Quick busy here? ++ */ ++ ++ drm_bo_busy(bo, 1); ++ drm_bo_fill_rep_arg(bo, rep); ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ return 0; ++} ++ ++static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, ++ uint32_t hint, ++ struct drm_bo_info_rep *rep) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_buffer_object *bo; ++ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ mutex_lock(&bo->mutex); ++ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1); ++ if (ret) ++ goto out; ++ ++ drm_bo_fill_rep_arg(bo, rep); ++out: ++ mutex_unlock(&bo->mutex); ++ drm_bo_usage_deref_unlocked(&bo); ++ return ret; ++} ++ ++int drm_buffer_object_create(struct drm_device *dev, ++ unsigned long size, ++ enum drm_bo_type type, ++ uint64_t flags, ++ uint32_t hint, ++ uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **buf_obj) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_buffer_object *bo; ++ int ret = 0; ++ unsigned long num_pages; ++ ++ size += buffer_start & ~PAGE_MASK; ++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ if (num_pages == 0) { ++ DRM_ERROR("Illegal buffer object size.\n"); ++ return -EINVAL; ++ } ++ ++ bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); ++ ++ if (!bo) ++ return -ENOMEM; ++ ++ mutex_init(&bo->mutex); ++ mutex_lock(&bo->mutex); ++ ++ atomic_set(&bo->usage, 1); ++ atomic_set(&bo->mapped, 0); ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&bo->lru); ++ INIT_LIST_HEAD(&bo->pinned_lru); ++ INIT_LIST_HEAD(&bo->ddestroy); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&bo->p_mm_list); ++ INIT_LIST_HEAD(&bo->vma_list); ++#endif ++ bo->dev = dev; ++ bo->type = type; ++ bo->num_pages = num_pages; ++ bo->mem.mem_type = DRM_BO_MEM_LOCAL; ++ bo->mem.num_pages = bo->num_pages; ++ bo->mem.mm_node = NULL; ++ bo->mem.page_alignment = page_alignment; ++ bo->buffer_start = buffer_start & PAGE_MASK; ++ bo->priv_flags = 0; ++ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | ++ DRM_BO_FLAG_MAPPABLE); ++ bo->mem.proposed_flags = 0; ++ atomic_inc(&bm->count); ++ /* ++ * Use drm_bo_modify_proposed_flags to error-check the proposed flags ++ */ ++ ret = drm_bo_modify_proposed_flags (bo, flags, flags); ++ if (ret) ++ goto out_err; ++ ++ /* ++ * For drm_bo_type_device buffers, allocate ++ * address space from the device so that applications ++ * can mmap the buffer from there ++ */ ++ if (bo->type == drm_bo_type_device) { ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_bo_setup_vm_locked(bo); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ goto out_err; ++ } ++ ++ mutex_unlock(&bo->mutex); ++ ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE, ++ 0, NULL); ++ if (ret) ++ goto out_err_unlocked; ++ ++ *buf_obj = bo; ++ return 0; ++ ++out_err: ++ mutex_unlock(&bo->mutex); ++out_err_unlocked: ++ drm_bo_usage_deref_unlocked(&bo); ++ return ret; ++} ++EXPORT_SYMBOL(drm_buffer_object_create); ++ ++ ++static int drm_bo_add_user_object(struct drm_file *file_priv, ++ struct drm_buffer_object *bo, int shareable) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(file_priv, &bo->base, shareable); ++ if (ret) ++ goto out; ++ ++ bo->base.remove = drm_bo_base_deref_locked; ++ bo->base.type = drm_buffer_type; ++ bo->base.ref_struct_locked = NULL; ++ bo->base.unref = drm_buffer_user_object_unmap; ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_create_arg *arg = data; ++ struct drm_bo_create_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_buffer_object *entry; ++ enum drm_bo_type bo_type; ++ int ret = 0; ++ ++ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", ++ (int)(req->size / 1024), req->page_alignment * 4); ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If the buffer creation request comes in with a starting address, ++ * that points at the desired user pages to map. Otherwise, create ++ * a drm_bo_type_device buffer, which uses pages allocated from the kernel ++ */ ++ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device; ++ ++ /* ++ * User buffers cannot be shared ++ */ ++ if (bo_type == drm_bo_type_user) ++ req->flags &= ~DRM_BO_FLAG_SHAREABLE; ++ ++ ret = drm_buffer_object_create(file_priv->minor->dev, ++ req->size, bo_type, req->flags, ++ req->hint, req->page_alignment, ++ req->buffer_start, &entry); ++ if (ret) ++ goto out; ++ ++ ret = drm_bo_add_user_object(file_priv, entry, ++ req->flags & DRM_BO_FLAG_SHAREABLE); ++ if (ret) { ++ drm_bo_usage_deref_unlocked(&entry); ++ goto out; ++ } ++ ++ mutex_lock(&entry->mutex); ++ drm_bo_fill_rep_arg(entry, rep); ++ mutex_unlock(&entry->mutex); ++ ++out: ++ return ret; ++} ++ ++int drm_bo_setstatus_ioctl(struct drm_device *dev, ++ void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_buffer_object *bo; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ bo = drm_lookup_buffer_object(file_priv, req->handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!bo) ++ return -EINVAL; ++ ++ if (bo->base.owner != file_priv) ++ req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); ++ ++ ret = drm_bo_do_validate(bo, req->flags, req->mask, ++ req->hint | DRM_BO_HINT_DONT_FENCE, ++ bo->fence_class, rep); ++ ++ drm_bo_usage_deref_unlocked(&bo); ++ ++ (void) drm_bo_read_unlock(&dev->bm.bm_lock); ++ ++ return ret; ++} ++ ++int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_buffer_object_map(file_priv, req->handle, req->mask, ++ req->hint, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_handle_arg *arg = data; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_buffer_object_unmap(file_priv, arg->handle); ++ return ret; ++} ++ ++ ++int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_reference_info_arg *arg = data; ++ struct drm_bo_handle_arg *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ struct drm_user_object *uo; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_ref(file_priv, req->handle, ++ drm_buffer_type, &uo); ++ if (ret) ++ return ret; ++ ++ ret = drm_bo_handle_info(file_priv, req->handle, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_handle_arg *arg = data; ++ int ret = 0; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); ++ return ret; ++} ++ ++int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_reference_info_arg *arg = data; ++ struct drm_bo_handle_arg *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_handle_info(file_priv, req->handle, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_bo_map_wait_idle_arg *arg = data; ++ struct drm_bo_info_req *req = &arg->d.req; ++ struct drm_bo_info_rep *rep = &arg->d.rep; ++ int ret; ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_handle_wait(file_priv, req->handle, ++ req->hint, rep); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int drm_bo_leave_list(struct drm_buffer_object *bo, ++ uint32_t mem_type, ++ int free_pinned, ++ int allow_errors) ++{ ++ struct drm_device *dev = bo->dev; ++ int ret = 0; ++ ++ mutex_lock(&bo->mutex); ++ ++ ret = drm_bo_expire_fence(bo, allow_errors); ++ if (ret) ++ goto out; ++ ++ if (free_pinned) { ++ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&bo->pinned_lru); ++ if (bo->pinned_node == bo->mem.mm_node) ++ bo->pinned_node = NULL; ++ if (bo->pinned_node != NULL) { ++ drm_mm_put_block(bo->pinned_node); ++ bo->pinned_node = NULL; ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { ++ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " ++ "cleanup. Removing flag and evicting.\n"); ++ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; ++ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT; ++ } ++ ++ if (bo->mem.mem_type == mem_type) ++ ret = drm_bo_evict(bo, mem_type, 0); ++ ++ if (ret) { ++ if (allow_errors) { ++ goto out; ++ } else { ++ ret = 0; ++ DRM_ERROR("Cleanup eviction failed\n"); ++ } ++ } ++ ++out: ++ mutex_unlock(&bo->mutex); ++ return ret; ++} ++ ++ ++static struct drm_buffer_object *drm_bo_entry(struct list_head *list, ++ int pinned_list) ++{ ++ if (pinned_list) ++ return list_entry(list, struct drm_buffer_object, pinned_lru); ++ else ++ return list_entry(list, struct drm_buffer_object, lru); ++} ++ ++/* ++ * dev->struct_mutex locked. ++ */ ++ ++static int drm_bo_force_list_clean(struct drm_device *dev, ++ struct list_head *head, ++ unsigned mem_type, ++ int free_pinned, ++ int allow_errors, ++ int pinned_list) ++{ ++ struct list_head *list, *next, *prev; ++ struct drm_buffer_object *entry, *nentry; ++ int ret; ++ int do_restart; ++ ++ /* ++ * The list traversal is a bit odd here, because an item may ++ * disappear from the list when we release the struct_mutex or ++ * when we decrease the usage count. Also we're not guaranteed ++ * to drain pinned lists, so we can't always restart. ++ */ ++ ++restart: ++ nentry = NULL; ++ list_for_each_safe(list, next, head) { ++ prev = list->prev; ++ ++ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); ++ atomic_inc(&entry->usage); ++ if (nentry) { ++ atomic_dec(&nentry->usage); ++ nentry = NULL; ++ } ++ ++ /* ++ * Protect the next item from destruction, so we can check ++ * its list pointers later on. ++ */ ++ ++ if (next != head) { ++ nentry = drm_bo_entry(next, pinned_list); ++ atomic_inc(&nentry->usage); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ ret = drm_bo_leave_list(entry, mem_type, free_pinned, ++ allow_errors); ++ mutex_lock(&dev->struct_mutex); ++ ++ drm_bo_usage_deref_locked(&entry); ++ if (ret) ++ return ret; ++ ++ /* ++ * Has the next item disappeared from the list? ++ */ ++ ++ do_restart = ((next->prev != list) && (next->prev != prev)); ++ ++ if (nentry != NULL && do_restart) ++ drm_bo_usage_deref_locked(&nentry); ++ ++ if (do_restart) ++ goto restart; ++ } ++ return 0; ++} ++ ++int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ int ret = -EINVAL; ++ ++ if (mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", mem_type); ++ return ret; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Trying to take down uninitialized " ++ "memory manager type %u\n", mem_type); ++ return ret; ++ } ++ ++ if ((man->kern_init_type) && (kern_clean == 0)) { ++ DRM_ERROR("Trying to take down kernel initialized " ++ "memory manager type %u\n", mem_type); ++ return -EPERM; ++ } ++ ++ man->use_type = 0; ++ man->has_type = 0; ++ ++ ret = 0; ++ if (mem_type > 0) { ++ BUG_ON(!list_empty(&bm->unfenced)); ++ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); ++ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); ++ ++ if (drm_mm_clean(&man->manager)) { ++ drm_mm_takedown(&man->manager); ++ } else { ++ ret = -EBUSY; ++ } ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_clean_mm); ++ ++/** ++ *Evict all buffers of a particular mem_type, but leave memory manager ++ *regions for NO_MOVE buffers intact. New buffers cannot be added at this ++ *point since we have the hardware lock. ++ */ ++ ++static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) ++{ ++ int ret; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem_type]; ++ ++ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); ++ return -EINVAL; ++ } ++ ++ if (!man->has_type) { ++ DRM_ERROR("Memory type %u has not been initialized.\n", ++ mem_type); ++ return 0; ++ } ++ ++ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); ++ if (ret) ++ return ret; ++ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); ++ ++ return ret; ++} ++ ++int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ struct drm_mem_type_manager *man; ++ ++ if (type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", type); ++ return ret; ++ } ++ ++ man = &bm->man[type]; ++ if (man->has_type) { ++ DRM_ERROR("Memory manager already initialized for type %d\n", ++ type); ++ return ret; ++ } ++ ++ ret = dev->driver->bo_driver->init_mem_type(dev, type, man); ++ if (ret) ++ return ret; ++ ++ ret = 0; ++ if (type != DRM_BO_MEM_LOCAL) { ++ if (!p_size) { ++ DRM_ERROR("Zero size memory manager type %d\n", type); ++ return ret; ++ } ++ ret = drm_mm_init(&man->manager, p_offset, p_size); ++ if (ret) ++ return ret; ++ } ++ man->has_type = 1; ++ man->use_type = 1; ++ man->kern_init_type = kern_init; ++ man->size = p_size; ++ ++ INIT_LIST_HEAD(&man->lru); ++ INIT_LIST_HEAD(&man->pinned); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_init_mm); ++ ++/* ++ * This function is intended to be called on drm driver unload. ++ * If you decide to call it from lastclose, you must protect the call ++ * from a potentially racing drm_bo_driver_init in firstopen. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_finish(struct drm_device *dev) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = 0; ++ unsigned i = DRM_BO_MEM_TYPES; ++ struct drm_mem_type_manager *man; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (!bm->initialized) ++ goto out; ++ bm->initialized = 0; ++ ++ while (i--) { ++ man = &bm->man[i]; ++ if (man->has_type) { ++ man->use_type = 0; ++ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) { ++ ret = -EBUSY; ++ DRM_ERROR("DRM memory manager type %d " ++ "is not clean.\n", i); ++ } ++ man->has_type = 0; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!cancel_delayed_work(&bm->wq)) ++ flush_scheduled_work(); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_delayed_delete(dev, 1); ++ if (list_empty(&bm->ddestroy)) ++ DRM_DEBUG("Delayed destroy list was clean\n"); ++ ++ if (list_empty(&bm->man[0].lru)) ++ DRM_DEBUG("Swap list was clean\n"); ++ ++ if (list_empty(&bm->man[0].pinned)) ++ DRM_DEBUG("NO_MOVE list was clean\n"); ++ ++ if (list_empty(&bm->unfenced)) ++ DRM_DEBUG("Unfenced list was clean\n"); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ClearPageReserved(bm->dummy_read_page); ++#endif ++ __free_page(bm->dummy_read_page); ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/* ++ * This function is intended to be called on drm driver load. ++ * If you decide to call it from firstopen, you must protect the call ++ * from a potentially racing drm_bo_driver_finish in lastclose. ++ * (This may happen on X server restart). ++ */ ++ ++int drm_bo_driver_init(struct drm_device *dev) ++{ ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ struct drm_buffer_manager *bm = &dev->bm; ++ int ret = -EINVAL; ++ ++ bm->dummy_read_page = NULL; ++ drm_bo_init_lock(&bm->bm_lock); ++ mutex_lock(&dev->struct_mutex); ++ if (!driver) ++ goto out_unlock; ++ ++ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); ++ if (!bm->dummy_read_page) { ++ ret = -ENOMEM; ++ goto out_unlock; ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ SetPageReserved(bm->dummy_read_page); ++#endif ++ ++ /* ++ * Initialize the system memory buffer type. ++ * Other types need to be driver / IOCTL initialized. ++ */ ++ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1); ++ if (ret) ++ goto out_unlock; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); ++#else ++ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); ++#endif ++ bm->initialized = 1; ++ bm->nice_mode = 1; ++ atomic_set(&bm->count, 0); ++ bm->cur_pages = 0; ++ INIT_LIST_HEAD(&bm->unfenced); ++ INIT_LIST_HEAD(&bm->ddestroy); ++out_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_driver_init); ++ ++int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_init_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv); ++ if (ret) ++ return ret; ++ ++ ret = -EINVAL; ++ if (arg->magic != DRM_BO_INIT_MAGIC) { ++ DRM_ERROR("You are using an old libdrm that is not compatible with\n" ++ "\tthe kernel DRM module. Please upgrade your libdrm.\n"); ++ return -EINVAL; ++ } ++ if (arg->major != DRM_BO_INIT_MAJOR) { ++ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" ++ "\tversion don't match. Got %d, expected %d.\n", ++ arg->major, DRM_BO_INIT_MAJOR); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized.\n"); ++ goto out; ++ } ++ if (arg->mem_type == 0) { ++ DRM_ERROR("System memory buffers already initialized.\n"); ++ goto out; ++ } ++ ret = drm_bo_init_mm(dev, arg->mem_type, ++ arg->p_offset, arg->p_size, 0); ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); ++ ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = -EINVAL; ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized\n"); ++ goto out; ++ } ++ if (arg->mem_type == 0) { ++ DRM_ERROR("No takedown for System memory buffers.\n"); ++ goto out; ++ } ++ ret = 0; ++ if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) { ++ if (ret == -EINVAL) ++ DRM_ERROR("Memory manager type %d not clean. " ++ "Delaying takedown\n", arg->mem_type); ++ ret = 0; ++ } ++out: ++ mutex_unlock(&dev->struct_mutex); ++ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); ++ ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { ++ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ++ ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv); ++ if (ret) ++ return ret; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_bo_lock_mm(dev, arg->mem_type); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) { ++ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int drm_mm_unlock_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_mm_type_arg *arg = data; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ int ret; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ++ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); ++ if (ret) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_mm_info_arg *arg = data; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_bo_driver *driver = dev->driver->bo_driver; ++ struct drm_mem_type_manager *man; ++ int ret = 0; ++ int mem_type = arg->mem_type; ++ ++ if (!driver) { ++ DRM_ERROR("Buffer objects are not supported by this driver\n"); ++ return -EINVAL; ++ } ++ ++ if (mem_type >= DRM_BO_MEM_TYPES) { ++ DRM_ERROR("Illegal memory type %d\n", arg->mem_type); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (!bm->initialized) { ++ DRM_ERROR("DRM memory manager was not initialized\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ++ man = &bm->man[arg->mem_type]; ++ ++ arg->p_size = man->size; ++ ++out: ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++/* ++ * buffer object vm functions. ++ */ ++ ++int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { ++ if (mem->mem_type == DRM_BO_MEM_LOCAL) ++ return 0; ++ ++ if (man->flags & _DRM_FLAG_MEMTYPE_CMA) ++ return 0; ++ ++ if (mem->flags & DRM_BO_FLAG_CACHED) ++ return 0; ++ } ++ return 1; ++} ++EXPORT_SYMBOL(drm_mem_reg_is_pci); ++ ++/** ++ * \c Get the PCI offset for the buffer object memory. ++ * ++ * \param bo The buffer object. ++ * \param bus_base On return the base of the PCI region ++ * \param bus_offset On return the byte offset into the PCI region ++ * \param bus_size On return the byte size of the buffer object or zero if ++ * the buffer object memory is not accessible through a PCI region. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Otherwise returns zero. ++ */ ++ ++int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, unsigned long *bus_size) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ ++ *bus_size = 0; ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) ++ return -EINVAL; ++ ++ if (drm_mem_reg_is_pci(dev, mem)) { ++ *bus_offset = mem->mm_node->start << PAGE_SHIFT; ++ *bus_size = mem->num_pages << PAGE_SHIFT; ++ *bus_base = man->io_offset; ++ } ++ ++ return 0; ++} ++ ++/** ++ * \c Kill all user-space virtual mappings of this buffer object. ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_bo_unmap_virtual(struct drm_buffer_object *bo) ++{ ++ struct drm_device *dev = bo->dev; ++ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; ++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; ++ ++ if (!dev->dev_mapping) ++ return; ++ ++ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); ++} ++ ++/** ++ * drm_bo_takedown_vm_locked: ++ * ++ * @bo: the buffer object to remove any drm device mapping ++ * ++ * Remove any associated vm mapping on the drm device node that ++ * would have been created for a drm_bo_type_device buffer ++ */ ++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ if (bo->type != drm_bo_type_device) ++ return; ++ ++ list = &bo->map_list; ++ if (list->user_token) { ++ drm_ht_remove_item(&dev->map_hash, &list->hash); ++ list->user_token = 0; ++ } ++ if (list->file_offset_node) { ++ drm_mm_put_block(list->file_offset_node); ++ list->file_offset_node = NULL; ++ } ++ ++ map = list->map; ++ if (!map) ++ return; ++ ++ drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); ++ list->map = NULL; ++ list->user_token = 0ULL; ++ drm_bo_usage_deref_locked(&bo); ++} ++ ++/** ++ * drm_bo_setup_vm_locked: ++ * ++ * @bo: the buffer to allocate address space for ++ * ++ * Allocate address space in the drm device so that applications ++ * can mmap the buffer and access the contents. This only ++ * applies to drm_bo_type_device objects as others are not ++ * placed in the drm device address space. ++ */ ++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) ++{ ++ struct drm_map_list *list = &bo->map_list; ++ drm_local_map_t *map; ++ struct drm_device *dev = bo->dev; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); ++ if (!list->map) ++ return -ENOMEM; ++ ++ map = list->map; ++ map->offset = 0; ++ map->type = _DRM_TTM; ++ map->flags = _DRM_REMOVABLE; ++ map->size = bo->mem.num_pages * PAGE_SIZE; ++ atomic_inc(&bo->usage); ++ map->handle = (void *)bo; ++ ++ list->file_offset_node = drm_mm_search_free(&dev->offset_manager, ++ bo->mem.num_pages, 0, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->file_offset_node = drm_mm_get_block(list->file_offset_node, ++ bo->mem.num_pages, 0); ++ ++ if (unlikely(!list->file_offset_node)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->hash.key = list->file_offset_node->start; ++ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { ++ drm_bo_takedown_vm_locked(bo); ++ return -ENOMEM; ++ } ++ ++ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; ++ ++ return 0; ++} ++ ++int drm_bo_version_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; ++ ++ arg->major = DRM_BO_INIT_MAJOR; ++ arg->minor = DRM_BO_INIT_MINOR; ++ arg->patchlevel = DRM_BO_INIT_PATCH; ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c +--- git/drivers/gpu/drm-tungsten/drm_bo_lock.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,189 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++/* ++ * This file implements a simple replacement for the buffer manager use ++ * of the heavyweight hardware lock. ++ * The lock is a read-write lock. Taking it in read mode is fast, and ++ * intended for in-kernel use only. ++ * Taking it in write mode is slow. ++ * ++ * The write mode is used only when there is a need to block all ++ * user-space processes from allocating a ++ * new memory area. ++ * Typical use in write mode is X server VT switching, and it's allowed ++ * to leave kernel space with the write lock held. If a user-space process ++ * dies while having the write-lock, it will be released during the file ++ * descriptor release. ++ * ++ * The read lock is typically placed at the start of an IOCTL- or ++ * user-space callable function that may end up allocating a memory area. ++ * This includes setstatus, super-ioctls and no_pfn; the latter may move ++ * unmappable regions to mappable. It's a bug to leave kernel space with the ++ * read lock held. ++ * ++ * Both read- and write lock taking may be interruptible for low signal-delivery ++ * latency. The locking functions will return -EAGAIN if interrupted by a ++ * signal. ++ * ++ * Locking order: The lock should be taken BEFORE any kernel mutexes ++ * or spinlocks. ++ */ ++ ++#include "drmP.h" ++ ++void drm_bo_init_lock(struct drm_bo_lock *lock) ++{ ++ DRM_INIT_WAITQUEUE(&lock->queue); ++ atomic_set(&lock->write_lock_pending, 0); ++ atomic_set(&lock->readers, 0); ++} ++ ++void drm_bo_read_unlock(struct drm_bo_lock *lock) ++{ ++ if (atomic_dec_and_test(&lock->readers)) ++ wake_up_all(&lock->queue); ++} ++EXPORT_SYMBOL(drm_bo_read_unlock); ++ ++int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) ++{ ++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { ++ int ret; ++ ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->write_lock_pending) == 0); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0); ++ if (ret) ++ return -EAGAIN; ++ } ++ ++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { ++ int ret; ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->readers) != -1); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->readers) != -1); ++ if (ret) ++ return -EAGAIN; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_read_lock); ++ ++static int __drm_bo_write_unlock(struct drm_bo_lock *lock) ++{ ++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) ++ return -EINVAL; ++ wake_up_all(&lock->queue); ++ return 0; ++} ++ ++static void drm_bo_write_lock_remove(struct drm_file *file_priv, ++ struct drm_user_object *item) ++{ ++ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); ++ int ret; ++ ++ ret = __drm_bo_write_unlock(lock); ++ BUG_ON(ret); ++} ++ ++int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible, ++ struct drm_file *file_priv) ++{ ++ int ret = 0; ++ struct drm_device *dev; ++ ++ atomic_inc(&lock->write_lock_pending); ++ ++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { ++ if (!interruptible) { ++ wait_event(lock->queue, ++ atomic_read(&lock->readers) == 0); ++ continue; ++ } ++ ret = wait_event_interruptible ++ (lock->queue, atomic_read(&lock->readers) == 0); ++ ++ if (ret) { ++ atomic_dec(&lock->write_lock_pending); ++ wake_up_all(&lock->queue); ++ return -EAGAIN; ++ } ++ } ++ ++ /* ++ * Add a dummy user-object, the destructor of which will ++ * make sure the lock is released if the client dies ++ * while holding it. ++ */ ++ ++ if (atomic_dec_and_test(&lock->write_lock_pending)) ++ wake_up_all(&lock->queue); ++ dev = file_priv->minor->dev; ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(file_priv, &lock->base, 0); ++ lock->base.remove = &drm_bo_write_lock_remove; ++ lock->base.type = drm_lock_type; ++ if (ret) ++ (void)__drm_bo_write_unlock(lock); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_ref_object *ro; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (lock->base.owner != file_priv) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); ++ BUG_ON(!ro); ++ drm_remove_ref_object(file_priv, ro); ++ lock->base.owner = NULL; ++ ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_move.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c +--- git/drivers/gpu/drm-tungsten/drm_bo_move.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,630 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Free the old memory node unless it's a pinned region and we ++ * have not been requested to free also pinned regions. ++ */ ++ ++static void drm_bo_free_old_node(struct drm_buffer_object *bo) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { ++ mutex_lock(&bo->dev->struct_mutex); ++ drm_mm_put_block(old_mem->mm_node); ++ mutex_unlock(&bo->dev->struct_mutex); ++ } ++ old_mem->mm_node = NULL; ++} ++ ++int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ int ret; ++ ++ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ if (evict) ++ drm_ttm_evict(ttm); ++ else ++ drm_ttm_unbind(ttm); ++ ++ drm_bo_free_old_node(bo); ++ DRM_FLAG_MASKED(old_mem->flags, ++ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); ++ old_mem->mem_type = DRM_BO_MEM_LOCAL; ++ save_flags = old_mem->flags; ++ } ++ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { ++ ret = drm_ttm_bind(ttm, new_mem); ++ if (ret) ++ return ret; ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_ttm); ++ ++/** ++ * \c Return a kernel virtual address to the buffer object PCI memory. ++ * ++ * \param bo The buffer object. ++ * \return Failure indication. ++ * ++ * Returns -EINVAL if the buffer object is currently not mappable. ++ * Returns -ENOMEM if the ioremap operation failed. ++ * Otherwise returns zero. ++ * ++ * After a successfull call, bo->iomap contains the virtual address, or NULL ++ * if the buffer object content is not accessible through PCI space. ++ * Call bo->mutex locked. ++ */ ++ ++int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void **virtual) ++{ ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ int ret; ++ void *addr; ++ ++ *virtual = NULL; ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); ++ if (ret || bus_size == 0) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ addr = (void *)(((u8 *) man->io_addr) + bus_offset); ++ else { ++ addr = ioremap_nocache(bus_base + bus_offset, bus_size); ++ if (!addr) ++ return -ENOMEM; ++ } ++ *virtual = addr; ++ return 0; ++} ++EXPORT_SYMBOL(drm_mem_reg_ioremap); ++ ++/** ++ * \c Unmap mapping obtained using drm_bo_ioremap ++ * ++ * \param bo The buffer object. ++ * ++ * Call bo->mutex locked. ++ */ ++ ++void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem, ++ void *virtual) ++{ ++ struct drm_buffer_manager *bm; ++ struct drm_mem_type_manager *man; ++ ++ bm = &dev->bm; ++ man = &bm->man[mem->mem_type]; ++ ++ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) ++ iounmap(virtual); ++} ++ ++static int drm_copy_io_page(void *dst, void *src, unsigned long page) ++{ ++ uint32_t *dstP = ++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); ++ uint32_t *srcP = ++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); ++ ++ int i; ++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) ++ iowrite32(ioread32(srcP++), dstP++); ++ return 0; ++} ++ ++static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, ++ unsigned long page) ++{ ++ struct page *d = drm_ttm_get_page(ttm, page); ++ void *dst; ++ ++ if (!d) ++ return -ENOMEM; ++ ++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); ++ dst = kmap(d); ++ if (!dst) ++ return -ENOMEM; ++ ++ memcpy_fromio(dst, src, PAGE_SIZE); ++ kunmap(d); ++ return 0; ++} ++ ++static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page) ++{ ++ struct page *s = drm_ttm_get_page(ttm, page); ++ void *src; ++ ++ if (!s) ++ return -ENOMEM; ++ ++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); ++ src = kmap(s); ++ if (!src) ++ return -ENOMEM; ++ ++ memcpy_toio(dst, src, PAGE_SIZE); ++ kunmap(s); ++ return 0; ++} ++ ++int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_ttm *ttm = bo->ttm; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ struct drm_bo_mem_reg old_copy = *old_mem; ++ void *old_iomap; ++ void *new_iomap; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ unsigned long i; ++ unsigned long page; ++ unsigned long add = 0; ++ int dir; ++ ++ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); ++ if (ret) ++ return ret; ++ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); ++ if (ret) ++ goto out; ++ ++ if (old_iomap == NULL && new_iomap == NULL) ++ goto out2; ++ if (old_iomap == NULL && ttm == NULL) ++ goto out2; ++ ++ add = 0; ++ dir = 1; ++ ++ if ((old_mem->mem_type == new_mem->mem_type) && ++ (new_mem->mm_node->start < ++ old_mem->mm_node->start + old_mem->mm_node->size)) { ++ dir = -1; ++ add = new_mem->num_pages - 1; ++ } ++ ++ for (i = 0; i < new_mem->num_pages; ++i) { ++ page = i * dir + add; ++ if (old_iomap == NULL) ++ ret = drm_copy_ttm_io_page(ttm, new_iomap, page); ++ else if (new_iomap == NULL) ++ ret = drm_copy_io_ttm_page(ttm, old_iomap, page); ++ else ++ ret = drm_copy_io_page(new_iomap, old_iomap, page); ++ if (ret) ++ goto out1; ++ } ++ mb(); ++out2: ++ drm_bo_free_old_node(bo); ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { ++ drm_ttm_unbind(ttm); ++ drm_ttm_destroy(ttm); ++ bo->ttm = NULL; ++ } ++ ++out1: ++ drm_mem_reg_iounmap(dev, new_mem, new_iomap); ++out: ++ drm_mem_reg_iounmap(dev, &old_copy, old_iomap); ++ return ret; ++} ++EXPORT_SYMBOL(drm_bo_move_memcpy); ++ ++/* ++ * Transfer a buffer object's memory and LRU status to a newly ++ * created object. User-space references remains with the old ++ * object. Call bo->mutex locked. ++ */ ++ ++int drm_buffer_object_transfer(struct drm_buffer_object *bo, ++ struct drm_buffer_object **new_obj) ++{ ++ struct drm_buffer_object *fbo; ++ struct drm_device *dev = bo->dev; ++ struct drm_buffer_manager *bm = &dev->bm; ++ ++ fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); ++ if (!fbo) ++ return -ENOMEM; ++ ++ *fbo = *bo; ++ mutex_init(&fbo->mutex); ++ mutex_lock(&fbo->mutex); ++ mutex_lock(&dev->struct_mutex); ++ ++ DRM_INIT_WAITQUEUE(&bo->event_queue); ++ INIT_LIST_HEAD(&fbo->ddestroy); ++ INIT_LIST_HEAD(&fbo->lru); ++ INIT_LIST_HEAD(&fbo->pinned_lru); ++#ifdef DRM_ODD_MM_COMPAT ++ INIT_LIST_HEAD(&fbo->vma_list); ++ INIT_LIST_HEAD(&fbo->p_mm_list); ++#endif ++ ++ fbo->fence = drm_fence_reference_locked(bo->fence); ++ fbo->pinned_node = NULL; ++ fbo->mem.mm_node->private = (void *)fbo; ++ atomic_set(&fbo->usage, 1); ++ atomic_inc(&bm->count); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&fbo->mutex); ++ ++ *new_obj = fbo; ++ return 0; ++} ++ ++/* ++ * Since move is underway, we need to block signals in this function. ++ * We cannot restart until it has finished. ++ */ ++ ++int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, uint32_t fence_class, ++ uint32_t fence_type, uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int ret; ++ uint64_t save_flags = old_mem->flags; ++ uint64_t save_proposed_flags = old_mem->proposed_flags; ++ struct drm_buffer_object *old_obj; ++ ++ if (bo->fence) ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ ret = drm_fence_object_create(dev, fence_class, fence_type, ++ fence_flags | DRM_FENCE_FLAG_EMIT, ++ &bo->fence); ++ bo->fence_type = fence_type; ++ if (ret) ++ return ret; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* ++ * In this mode, we don't allow pipelining a copy blit, ++ * since the buffer will be accessible from user space ++ * the moment we return and rebuild the page tables. ++ * ++ * With normal vm operation, page tables are rebuilt ++ * on demand using fault(), which waits for buffer idle. ++ */ ++ if (1) ++#else ++ if (evict || ((bo->mem.mm_node == bo->pinned_node) && ++ bo->mem.mm_node != NULL)) ++#endif ++ { ++ if (bo->fence) { ++ (void) drm_fence_object_wait(bo->fence, 0, 1, ++ bo->fence_type); ++ drm_fence_usage_deref_unlocked(&bo->fence); ++ } ++ drm_bo_free_old_node(bo); ++ ++ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { ++ drm_ttm_unbind(bo->ttm); ++ drm_ttm_destroy(bo->ttm); ++ bo->ttm = NULL; ++ } ++ } else { ++ ++ /* This should help pipeline ordinary buffer moves. ++ * ++ * Hang old buffer memory on a new buffer object, ++ * and leave it to be released when the GPU ++ * operation has completed. ++ */ ++ ++ ret = drm_buffer_object_transfer(bo, &old_obj); ++ ++ if (ret) ++ return ret; ++ ++ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) ++ old_obj->ttm = NULL; ++ else ++ bo->ttm = NULL; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_del_init(&old_obj->lru); ++ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); ++ drm_bo_add_to_lru(old_obj); ++ ++ drm_bo_usage_deref_locked(&old_obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ } ++ ++ *old_mem = *new_mem; ++ new_mem->mm_node = NULL; ++ old_mem->proposed_flags = save_proposed_flags; ++ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_move_accel_cleanup); ++ ++int drm_bo_same_page(unsigned long offset, ++ unsigned long offset2) ++{ ++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); ++} ++EXPORT_SYMBOL(drm_bo_same_page); ++ ++unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end) ++{ ++ offset = (offset + PAGE_SIZE) & PAGE_MASK; ++ return (end < offset) ? end : offset; ++} ++EXPORT_SYMBOL(drm_bo_offset_end); ++ ++static pgprot_t drm_kernel_io_prot(uint32_t map_type) ++{ ++ pgprot_t tmp = PAGE_KERNEL; ++ ++#if defined(__i386__) || defined(__x86_64__) ++#ifdef USE_PAT_WC ++#warning using pat ++ if (drm_use_pat() && map_type == _DRM_TTM) { ++ pgprot_val(tmp) |= _PAGE_PAT; ++ return tmp; ++ } ++#endif ++ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { ++ pgprot_val(tmp) |= _PAGE_PCD; ++ pgprot_val(tmp) &= ~_PAGE_PWT; ++ } ++#elif defined(__powerpc__) ++ pgprot_val(tmp) |= _PAGE_NO_CACHE; ++ if (map_type == _DRM_REGISTERS) ++ pgprot_val(tmp) |= _PAGE_GUARDED; ++#endif ++#if defined(__ia64__) ++ if (map_type == _DRM_TTM) ++ tmp = pgprot_writecombine(tmp); ++ else ++ tmp = pgprot_noncached(tmp); ++#endif ++ return tmp; ++} ++ ++static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, ++ unsigned long bus_offset, unsigned long bus_size, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ ++ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { ++ map->bo_kmap_type = bo_map_premapped; ++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); ++ } else { ++ map->bo_kmap_type = bo_map_iomap; ++ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, ++ unsigned long start_page, unsigned long num_pages, ++ struct drm_bo_kmap_obj *map) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ pgprot_t prot; ++ struct drm_ttm *ttm = bo->ttm; ++ struct page *d; ++ int i; ++ ++ BUG_ON(!ttm); ++ ++ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { ++ ++ /* ++ * We're mapping a single page, and the desired ++ * page protection is consistent with the bo. ++ */ ++ ++ map->bo_kmap_type = bo_map_kmap; ++ map->page = drm_ttm_get_page(ttm, start_page); ++ map->virtual = kmap(map->page); ++ } else { ++ /* ++ * Populate the part we're mapping; ++ */ ++ ++ for (i = start_page; i < start_page + num_pages; ++i) { ++ d = drm_ttm_get_page(ttm, i); ++ if (!d) ++ return -ENOMEM; ++ } ++ ++ /* ++ * We need to use vmap to get the desired page protection ++ * or to make the buffer object look contigous. ++ */ ++ ++ prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : ++ drm_kernel_io_prot(man->drm_bus_maptype); ++ map->bo_kmap_type = bo_map_vmap; ++ map->virtual = vmap(ttm->pages + start_page, ++ num_pages, 0, prot); ++ } ++ return (!map->virtual) ? -ENOMEM : 0; ++} ++ ++/* ++ * This function is to be used for kernel mapping of buffer objects. ++ * It chooses the appropriate mapping method depending on the memory type ++ * and caching policy the buffer currently has. ++ * Mapping multiple pages or buffers that live in io memory is a bit slow and ++ * consumes vmalloc space. Be restrictive with such mappings. ++ * Mapping single pages usually returns the logical kernel address, ++ * (which is fast) ++ * BUG may use slower temporary mappings for high memory pages or ++ * uncached / write-combined pages. ++ * ++ * The function fills in a drm_bo_kmap_obj which can be used to return the ++ * kernel virtual address of the buffer. ++ * ++ * Code servicing a non-priviliged user request is only allowed to map one ++ * page at a time. We might need to implement a better scheme to stop such ++ * processes from consuming all vmalloc space. ++ */ ++ ++int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map) ++{ ++ int ret; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ map->virtual = NULL; ++ ++ if (num_pages > bo->num_pages) ++ return -EINVAL; ++ if (start_page > bo->num_pages) ++ return -EINVAL; ++#if 0 ++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) ++ return -EPERM; ++#endif ++ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, ++ &bus_offset, &bus_size); ++ ++ if (ret) ++ return ret; ++ ++ if (bus_size == 0) { ++ return drm_bo_kmap_ttm(bo, start_page, num_pages, map); ++ } else { ++ bus_offset += start_page << PAGE_SHIFT; ++ bus_size = num_pages << PAGE_SHIFT; ++ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); ++ } ++} ++EXPORT_SYMBOL(drm_bo_kmap); ++ ++void drm_bo_kunmap(struct drm_bo_kmap_obj *map) ++{ ++ if (!map->virtual) ++ return; ++ ++ switch (map->bo_kmap_type) { ++ case bo_map_iomap: ++ iounmap(map->virtual); ++ break; ++ case bo_map_vmap: ++ vunmap(map->virtual); ++ break; ++ case bo_map_kmap: ++ kunmap(map->page); ++ break; ++ case bo_map_premapped: ++ break; ++ default: ++ BUG(); ++ } ++ map->virtual = NULL; ++ map->page = NULL; ++} ++EXPORT_SYMBOL(drm_bo_kunmap); ++ ++int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot) ++{ ++ struct drm_bo_mem_reg *mem = &bo->mem; ++ struct drm_device *dev = bo->dev; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long bus_base; ++ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; ++ int ret; ++ ++ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, ++ &bus_size); ++ if (ret) ++ return -EINVAL; ++ ++ if (bus_size != 0) ++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; ++ else if (!bo->ttm) ++ return -EINVAL; ++ else ++ *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT)); ++ ++ *prot = (mem->flags & DRM_BO_FLAG_CACHED) ? ++ PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_bo_pfn_prot); ++ +diff -Nurd git/drivers/gpu/drm-tungsten/drm_bufs.c git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c +--- git/drivers/gpu/drm-tungsten/drm_bufs.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1608 @@ ++/** ++ * \file drm_bufs.c ++ * Generic buffer template ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "drmP.h" ++ ++unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) ++{ ++ return pci_resource_start(dev->pdev, resource); ++} ++EXPORT_SYMBOL(drm_get_resource_start); ++ ++unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) ++{ ++ return pci_resource_len(dev->pdev, resource); ++} ++EXPORT_SYMBOL(drm_get_resource_len); ++ ++struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) ++{ ++ struct drm_map_list *entry; ++ list_for_each_entry(entry, &dev->maplist, head) { ++ if (entry->map && map->type == entry->map->type && ++ ((entry->map->offset == map->offset) || ++ (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { ++ return entry; ++ } ++ } ++ ++ return NULL; ++} ++EXPORT_SYMBOL(drm_find_matching_map); ++ ++static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, ++ unsigned long user_token, int hashed_handle) ++{ ++ int use_hashed_handle; ++ ++#if (BITS_PER_LONG == 64) ++ use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); ++#elif (BITS_PER_LONG == 32) ++ use_hashed_handle = hashed_handle; ++#else ++#error Unsupported long size. Neither 64 nor 32 bits. ++#endif ++ ++ if (!use_hashed_handle) { ++ int ret; ++ hash->key = user_token >> PAGE_SHIFT; ++ ret = drm_ht_insert_item(&dev->map_hash, hash); ++ if (ret != -EINVAL) ++ return ret; ++ } ++ return drm_ht_just_insert_please(&dev->map_hash, hash, ++ user_token, 32 - PAGE_SHIFT - 3, ++ 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT); ++} ++ ++/** ++ * Ioctl to specify a range of memory that is available for mapping by a non-root process. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_map structure. ++ * \return zero on success or a negative value on error. ++ * ++ * Adjusts the memory offset to its absolute value according to the mapping ++ * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where ++ * applicable and if supported by the kernel. ++ */ ++static int drm_addmap_core(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, ++ struct drm_map_list **maplist) ++{ ++ struct drm_map *map; ++ struct drm_map_list *list; ++ drm_dma_handle_t *dmah; ++ unsigned long user_token; ++ int ret; ++ ++ map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); ++ if (!map) ++ return -ENOMEM; ++ ++ map->offset = offset; ++ map->size = size; ++ map->flags = flags; ++ map->type = type; ++ ++ /* Only allow shared memory to be removable since we only keep enough ++ * book keeping information about shared memory to allow for removal ++ * when processes fork. ++ */ ++ if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", ++ map->offset, map->size, map->type); ++ if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ map->mtrr = -1; ++ map->handle = NULL; ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ case _DRM_FRAME_BUFFER: ++#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) ++ if (map->offset + (map->size - 1) < map->offset || ++ map->offset < virt_to_phys(high_memory)) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++#endif ++#ifdef __alpha__ ++ map->offset += dev->hose->mem_space->start; ++#endif ++ /* Some drivers preinitialize some maps, without the X Server ++ * needing to be aware of it. Therefore, we just return success ++ * when the server tries to create a duplicate map. ++ */ ++ list = drm_find_matching_map(dev, map); ++ if (list != NULL) { ++ if (list->map->size != map->size) { ++ DRM_DEBUG("Matching maps of type %d with " ++ "mismatched sizes, (%ld vs %ld)\n", ++ map->type, map->size, ++ list->map->size); ++ list->map->size = map->size; ++ } ++ ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ *maplist = list; ++ return 0; ++ } ++ ++ if (drm_core_has_MTRR(dev)) { ++ if (map->type == _DRM_FRAME_BUFFER || ++ (map->flags & _DRM_WRITE_COMBINING)) { ++ map->mtrr = mtrr_add(map->offset, map->size, ++ MTRR_TYPE_WRCOMB, 1); ++ } ++ } ++ if (map->type == _DRM_REGISTERS) { ++ map->handle = ioremap(map->offset, map->size); ++ if (!map->handle) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ } ++ break; ++ case _DRM_SHM: ++ list = drm_find_matching_map(dev, map); ++ if (list != NULL) { ++ if(list->map->size != map->size) { ++ DRM_DEBUG("Matching maps of type %d with " ++ "mismatched sizes, (%ld vs %ld)\n", ++ map->type, map->size, list->map->size); ++ list->map->size = map->size; ++ } ++ ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ *maplist = list; ++ return 0; ++ } ++ map->handle = vmalloc_user(map->size); ++ DRM_DEBUG("%lu %d %p\n", ++ map->size, drm_order(map->size), map->handle); ++ if (!map->handle) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ map->offset = (unsigned long)map->handle; ++ if (map->flags & _DRM_CONTAINS_LOCK) { ++ /* Prevent a 2nd X Server from creating a 2nd lock */ ++ if (dev->lock.hw_lock != NULL) { ++ vfree(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EBUSY; ++ } ++ dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ ++ } ++ break; ++ case _DRM_AGP: { ++ struct drm_agp_mem *entry; ++ int valid = 0; ++ ++ if (!drm_core_has_AGP(dev)) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++#ifdef __alpha__ ++ map->offset += dev->hose->mem_space->start; ++#endif ++ /* In some cases (i810 driver), user space may have already ++ * added the AGP base itself, because dev->agp->base previously ++ * only got set during AGP enable. So, only add the base ++ * address if the map's offset isn't already within the ++ * aperture. ++ */ ++ if (map->offset < dev->agp->base || ++ map->offset > dev->agp->base + ++ dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { ++ map->offset += dev->agp->base; ++ } ++ map->mtrr = dev->agp->agp_mtrr; /* for getmap */ ++ ++ /* This assumes the DRM is in total control of AGP space. ++ * It's not always the case as AGP can be in the control ++ * of user space (i.e. i810 driver). So this loop will get ++ * skipped and we double check that dev->agp->memory is ++ * actually set as well as being invalid before EPERM'ing ++ */ ++ list_for_each_entry(entry, &dev->agp->memory, head) { ++ if ((map->offset >= entry->bound) && ++ (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { ++ valid = 1; ++ break; ++ } ++ } ++ if (!list_empty(&dev->agp->memory) && !valid) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EPERM; ++ } ++ DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); ++ break; ++ } ++ case _DRM_SCATTER_GATHER: ++ if (!dev->sg) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ map->offset += (unsigned long)dev->sg->virtual; ++ break; ++ case _DRM_CONSISTENT: ++ /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, ++ * As we're limiting the address to 2^32-1 (or less), ++ * casting it down to 32 bits is no problem, but we ++ * need to point to a 64bit variable first. */ ++ dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); ++ if (!dmah) { ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -ENOMEM; ++ } ++ map->handle = dmah->vaddr; ++ map->offset = (unsigned long)dmah->busaddr; ++ kfree(dmah); ++ break; ++ default: ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ ++ list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); ++ if (!list) { ++ if (map->type == _DRM_REGISTERS) ++ iounmap(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ return -EINVAL; ++ } ++ memset(list, 0, sizeof(*list)); ++ list->map = map; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_add(&list->head, &dev->maplist); ++ ++ /* Assign a 32-bit handle */ ++ ++ user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle : ++ map->offset; ++ ret = drm_map_handle(dev, &list->hash, user_token, 0); ++ ++ if (ret) { ++ if (map->type == _DRM_REGISTERS) ++ iounmap(map->handle); ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ drm_free(list, sizeof(*list), DRM_MEM_MAPS); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ list->user_token = list->hash.key << PAGE_SHIFT; ++ mutex_unlock(&dev->struct_mutex); ++ ++ *maplist = list; ++ return 0; ++} ++ ++int drm_addmap(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, drm_local_map_t ** map_ptr) ++{ ++ struct drm_map_list *list; ++ int rc; ++ ++ rc = drm_addmap_core(dev, offset, size, type, flags, &list); ++ if (!rc) ++ *map_ptr = list->map; ++ return rc; ++} ++ ++EXPORT_SYMBOL(drm_addmap); ++ ++int drm_addmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *map = data; ++ struct drm_map_list *maplist; ++ int err; ++ ++ if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) ++ return -EPERM; ++ ++ err = drm_addmap_core(dev, map->offset, map->size, map->type, ++ map->flags, &maplist); ++ ++ if (err) ++ return err; ++ ++ /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ ++ map->handle = (void *)(unsigned long)maplist->user_token; ++ return 0; ++} ++ ++/** ++ * Remove a map private from list and deallocate resources if the mapping ++ * isn't in use. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a struct drm_map structure. ++ * \return zero on success or a negative value on error. ++ * ++ * Searches the map on drm_device::maplist, removes it from the list, see if ++ * its being used, and free any associate resource (such as MTRR's) if it's not ++ * being on use. ++ * ++ * \sa drm_addmap ++ */ ++int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ++{ ++ struct drm_map_list *r_list = NULL, *list_t; ++ drm_dma_handle_t dmah; ++ int found = 0; ++ ++ /* Find the list entry for the map and remove it */ ++ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { ++ if (r_list->map == map) { ++ list_del(&r_list->head); ++ drm_ht_remove_key(&dev->map_hash, ++ r_list->user_token >> PAGE_SHIFT); ++ drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return -EINVAL; ++ ++ /* List has wrapped around to the head pointer, or it's empty and we ++ * didn't find anything. ++ */ ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ iounmap(map->handle); ++ /* FALLTHROUGH */ ++ case _DRM_FRAME_BUFFER: ++ if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { ++ int retcode; ++ retcode = mtrr_del(map->mtrr, map->offset, map->size); ++ DRM_DEBUG("mtrr_del=%d\n", retcode); ++ } ++ break; ++ case _DRM_SHM: ++ vfree(map->handle); ++ break; ++ case _DRM_AGP: ++ case _DRM_SCATTER_GATHER: ++ break; ++ case _DRM_CONSISTENT: ++ dmah.vaddr = map->handle; ++ dmah.busaddr = map->offset; ++ dmah.size = map->size; ++ __drm_pci_free(dev, &dmah); ++ break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ } ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_rmmap_locked); ++ ++int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) ++{ ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_rmmap_locked(dev, map); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_rmmap); ++ ++/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on ++ * the last close of the device, and this is necessary for cleanup when things ++ * exit uncleanly. Therefore, having userland manually remove mappings seems ++ * like a pointless exercise since they're going away anyway. ++ * ++ * One use case might be after addmap is allowed for normal users for SHM and ++ * gets used by drivers that the server doesn't need to care about. This seems ++ * unlikely. ++ */ ++int drm_rmmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *request = data; ++ drm_local_map_t *map = NULL; ++ struct drm_map_list *r_list; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map && ++ r_list->user_token == (unsigned long)request->handle && ++ r_list->map->flags & _DRM_REMOVABLE) { ++ map = r_list->map; ++ break; ++ } ++ } ++ ++ /* List has wrapped around to the head pointer, or its empty we didn't ++ * find anything. ++ */ ++ if (list_empty(&dev->maplist) || !map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ /* Register and framebuffer maps are permanent */ ++ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++ } ++ ++ ret = drm_rmmap_locked(dev, map); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++/** ++ * Cleanup after an error on one of the addbufs() functions. ++ * ++ * \param dev DRM device. ++ * \param entry buffer entry where the error occurred. ++ * ++ * Frees any pages and buffers associated with the given entry. ++ */ ++static void drm_cleanup_buf_error(struct drm_device *dev, ++ struct drm_buf_entry *entry) ++{ ++ int i; ++ ++ if (entry->seg_count) { ++ for (i = 0; i < entry->seg_count; i++) { ++ if (entry->seglist[i]) { ++ drm_pci_free(dev, entry->seglist[i]); ++ } ++ } ++ drm_free(entry->seglist, ++ entry->seg_count * ++ sizeof(*entry->seglist), DRM_MEM_SEGS); ++ ++ entry->seg_count = 0; ++ } ++ ++ if (entry->buf_count) { ++ for (i = 0; i < entry->buf_count; i++) { ++ if (entry->buflist[i].dev_private) { ++ drm_free(entry->buflist[i].dev_private, ++ entry->buflist[i].dev_priv_size, ++ DRM_MEM_BUFS); ++ } ++ } ++ drm_free(entry->buflist, ++ entry->buf_count * ++ sizeof(*entry->buflist), DRM_MEM_BUFS); ++ ++ entry->buf_count = 0; ++ } ++} ++ ++#if __OS_HAS_AGP ++/** ++ * Add AGP buffers for DMA transfers. ++ * ++ * \param dev struct drm_device to which the buffers are to be added. ++ * \param request pointer to a struct drm_buf_desc describing the request. ++ * \return zero on success or a negative number on failure. ++ * ++ * After some sanity checks creates a drm_buf structure for each buffer and ++ * reallocates the buffer list of the same size order to accommodate the new ++ * buffers. ++ */ ++int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_agp_mem *agp_entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i, valid; ++ struct drm_buf **temp_buflist; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = dev->agp->base + request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lx\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ /* Make sure buffers are located in AGP memory that we own */ ++ valid = 0; ++ list_for_each_entry(agp_entry, &dev->agp->memory, head) { ++ if ((agp_offset >= agp_entry->bound) && ++ (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { ++ valid = 1; ++ break; ++ } ++ } ++ if (!list_empty(&dev->agp->memory) && !valid) { ++ DRM_DEBUG("zone invalid\n"); ++ return -EINVAL; ++ } ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_AGP; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++EXPORT_SYMBOL(drm_addbufs_agp); ++#endif /* __OS_HAS_AGP */ ++ ++int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int count; ++ int order; ++ int size; ++ int total; ++ int page_order; ++ struct drm_buf_entry *entry; ++ drm_dma_handle_t *dmah; ++ struct drm_buf *buf; ++ int alignment; ++ unsigned long offset; ++ int i; ++ int byte_count; ++ int page_count; ++ unsigned long *temp_pagelist; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", ++ request->count, request->size, size, order, dev->queue_count); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->seglist = drm_alloc(count * sizeof(*entry->seglist), ++ DRM_MEM_SEGS); ++ if (!entry->seglist) { ++ drm_free(entry->buflist, ++ count * sizeof(*entry->buflist), DRM_MEM_BUFS); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->seglist, 0, count * sizeof(*entry->seglist)); ++ ++ /* Keep the original pagelist until we know all the allocations ++ * have succeeded ++ */ ++ temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ if (!temp_pagelist) { ++ drm_free(entry->buflist, ++ count * sizeof(*entry->buflist), DRM_MEM_BUFS); ++ drm_free(entry->seglist, ++ count * sizeof(*entry->seglist), DRM_MEM_SEGS); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memcpy(temp_pagelist, ++ dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); ++ DRM_DEBUG("pagelist: %d entries\n", ++ dma->page_count + (count << page_order)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ byte_count = 0; ++ page_count = 0; ++ ++ while (entry->buf_count < count) { ++ ++ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); ++ ++ if (!dmah) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ entry->seg_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ entry->seglist[entry->seg_count++] = dmah; ++ for (i = 0; i < (1 << page_order); i++) { ++ DRM_DEBUG("page %d @ 0x%08lx\n", ++ dma->page_count + page_count, ++ (unsigned long)dmah->vaddr + PAGE_SIZE * i); ++ temp_pagelist[dma->page_count + page_count++] ++ = (unsigned long)dmah->vaddr + PAGE_SIZE * i; ++ } ++ for (offset = 0; ++ offset + size <= total && entry->buf_count < count; ++ offset += alignment, ++entry->buf_count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ buf->offset = (dma->byte_count + byte_count + offset); ++ buf->address = (void *)(dmah->vaddr + offset); ++ buf->bus_address = dmah->busaddr + offset; ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, ++ DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ entry->seg_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + ++ (count << page_order)) ++ * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", ++ entry->buf_count, buf->address); ++ } ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ drm_free(temp_pagelist, ++ (dma->page_count + (count << page_order)) ++ * sizeof(*dma->pagelist), DRM_MEM_PAGES); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ /* No allocations failed, so now we can replace the orginal pagelist ++ * with the new one. ++ */ ++ if (dma->page_count) { ++ drm_free(dma->pagelist, ++ dma->page_count * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ } ++ dma->pagelist = temp_pagelist; ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += entry->seg_count << page_order; ++ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ if (request->flags & _DRM_PCI_BUFFER_RO) ++ dma->flags = _DRM_DMA_USE_PCI_RO; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++ ++} ++EXPORT_SYMBOL(drm_addbufs_pci); ++ ++static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lu\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset ++ + (unsigned long)dev->sg->virtual); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_SG; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++ ++int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_entry *entry; ++ struct drm_buf *buf; ++ unsigned long offset; ++ unsigned long agp_offset; ++ int count; ++ int order; ++ int size; ++ int alignment; ++ int page_order; ++ int total; ++ int byte_count; ++ int i; ++ struct drm_buf **temp_buflist; ++ ++ if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ count = request->count; ++ order = drm_order(request->size); ++ size = 1 << order; ++ ++ alignment = (request->flags & _DRM_PAGE_ALIGN) ++ ? PAGE_ALIGN(size) : size; ++ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; ++ total = PAGE_SIZE << page_order; ++ ++ byte_count = 0; ++ agp_offset = request->agp_start; ++ ++ DRM_DEBUG("count: %d\n", count); ++ DRM_DEBUG("order: %d\n", order); ++ DRM_DEBUG("size: %d\n", size); ++ DRM_DEBUG("agp_offset: %lu\n", agp_offset); ++ DRM_DEBUG("alignment: %d\n", alignment); ++ DRM_DEBUG("page_order: %d\n", page_order); ++ DRM_DEBUG("total: %d\n", total); ++ ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ if (dev->queue_count) ++ return -EBUSY; /* Not while in use */ ++ ++ spin_lock(&dev->count_lock); ++ if (dev->buf_use) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ atomic_inc(&dev->buf_alloc); ++ spin_unlock(&dev->count_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ entry = &dma->bufs[order]; ++ if (entry->buf_count) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; /* May only call once for each order */ ++ } ++ ++ if (count < 0 || count > 4096) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -EINVAL; ++ } ++ ++ entry->buflist = drm_alloc(count * sizeof(*entry->buflist), ++ DRM_MEM_BUFS); ++ if (!entry->buflist) { ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(entry->buflist, 0, count * sizeof(*entry->buflist)); ++ ++ entry->buf_size = size; ++ entry->page_order = page_order; ++ ++ offset = 0; ++ ++ while (entry->buf_count < count) { ++ buf = &entry->buflist[entry->buf_count]; ++ buf->idx = dma->buf_count + entry->buf_count; ++ buf->total = alignment; ++ buf->order = order; ++ buf->used = 0; ++ ++ buf->offset = (dma->byte_count + offset); ++ buf->bus_address = agp_offset + offset; ++ buf->address = (void *)(agp_offset + offset); ++ buf->next = NULL; ++ buf->waiting = 0; ++ buf->pending = 0; ++ init_waitqueue_head(&buf->dma_wait); ++ buf->file_priv = NULL; ++ ++ buf->dev_priv_size = dev->driver->dev_priv_size; ++ buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); ++ if (!buf->dev_private) { ++ /* Set count correctly so we free the proper amount. */ ++ entry->buf_count = count; ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ memset(buf->dev_private, 0, buf->dev_priv_size); ++ ++ DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); ++ ++ offset += alignment; ++ entry->buf_count++; ++ byte_count += PAGE_SIZE << page_order; ++ } ++ ++ DRM_DEBUG("byte_count: %d\n", byte_count); ++ ++ temp_buflist = drm_realloc(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), ++ (dma->buf_count + entry->buf_count) ++ * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ if (!temp_buflist) { ++ /* Free the entry because it isn't valid */ ++ drm_cleanup_buf_error(dev, entry); ++ mutex_unlock(&dev->struct_mutex); ++ atomic_dec(&dev->buf_alloc); ++ return -ENOMEM; ++ } ++ dma->buflist = temp_buflist; ++ ++ for (i = 0; i < entry->buf_count; i++) { ++ dma->buflist[i + dma->buf_count] = &entry->buflist[i]; ++ } ++ ++ dma->buf_count += entry->buf_count; ++ dma->seg_count += entry->seg_count; ++ dma->page_count += byte_count >> PAGE_SHIFT; ++ dma->byte_count += byte_count; ++ ++ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); ++ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->count = entry->buf_count; ++ request->size = size; ++ ++ dma->flags = _DRM_DMA_USE_FB; ++ ++ atomic_dec(&dev->buf_alloc); ++ return 0; ++} ++EXPORT_SYMBOL(drm_addbufs_fb); ++ ++ ++/** ++ * Add buffers for DMA transfers (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a struct drm_buf_desc request. ++ * \return zero on success or a negative number on failure. ++ * ++ * According with the memory type specified in drm_buf_desc::flags and the ++ * build options, it dispatches the call either to addbufs_agp(), ++ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent ++ * PCI memory respectively. ++ */ ++int drm_addbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_buf_desc *request = data; ++ int ret; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++#if __OS_HAS_AGP ++ if (request->flags & _DRM_AGP_BUFFER) ++ ret = drm_addbufs_agp(dev, request); ++ else ++#endif ++ if (request->flags & _DRM_SG_BUFFER) ++ ret = drm_addbufs_sg(dev, request); ++ else if (request->flags & _DRM_FB_BUFFER) ++ ret = drm_addbufs_fb(dev, request); ++ else ++ ret = drm_addbufs_pci(dev, request); ++ ++ return ret; ++} ++ ++/** ++ * Get information about the buffer mappings. ++ * ++ * This was originally mean for debugging purposes, or by a sophisticated ++ * client library to determine how best to use the available buffers (e.g., ++ * large buffers can be used for image transfer). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_info structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Increments drm_device::buf_use while holding the drm_device::count_lock ++ * lock, preventing of allocating more buffers after this call. Information ++ * about each requested buffer is then copied into user space. ++ */ ++int drm_infobufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_info *request = data; ++ int i; ++ int count; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ spin_lock(&dev->count_lock); ++ if (atomic_read(&dev->buf_alloc)) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ ++dev->buf_use; /* Can't allocate more after this call */ ++ spin_unlock(&dev->count_lock); ++ ++ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { ++ if (dma->bufs[i].buf_count) ++ ++count; ++ } ++ ++ DRM_DEBUG("count = %d\n", count); ++ ++ if (request->count >= count) { ++ for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { ++ if (dma->bufs[i].buf_count) { ++ struct drm_buf_desc __user *to = ++ &request->list[count]; ++ struct drm_buf_entry *from = &dma->bufs[i]; ++ struct drm_freelist *list = &dma->bufs[i].freelist; ++ if (copy_to_user(&to->count, ++ &from->buf_count, ++ sizeof(from->buf_count)) || ++ copy_to_user(&to->size, ++ &from->buf_size, ++ sizeof(from->buf_size)) || ++ copy_to_user(&to->low_mark, ++ &list->low_mark, ++ sizeof(list->low_mark)) || ++ copy_to_user(&to->high_mark, ++ &list->high_mark, ++ sizeof(list->high_mark))) ++ return -EFAULT; ++ ++ DRM_DEBUG("%d %d %d %d %d\n", ++ i, ++ dma->bufs[i].buf_count, ++ dma->bufs[i].buf_size, ++ dma->bufs[i].freelist.low_mark, ++ dma->bufs[i].freelist.high_mark); ++ ++count; ++ } ++ } ++ } ++ request->count = count; ++ ++ return 0; ++} ++ ++/** ++ * Specifies a low and high water mark for buffer allocation ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg a pointer to a drm_buf_desc structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies that the size order is bounded between the admissible orders and ++ * updates the respective drm_device_dma::bufs entry low and high water mark. ++ * ++ * \note This ioctl is deprecated and mostly never used. ++ */ ++int drm_markbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_desc *request = data; ++ int order; ++ struct drm_buf_entry *entry; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ DRM_DEBUG("%d, %d, %d\n", ++ request->size, request->low_mark, request->high_mark); ++ order = drm_order(request->size); ++ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) ++ return -EINVAL; ++ entry = &dma->bufs[order]; ++ ++ if (request->low_mark < 0 || request->low_mark > entry->buf_count) ++ return -EINVAL; ++ if (request->high_mark < 0 || request->high_mark > entry->buf_count) ++ return -EINVAL; ++ ++ entry->freelist.low_mark = request->low_mark; ++ entry->freelist.high_mark = request->high_mark; ++ ++ return 0; ++} ++ ++/** ++ * Unreserve the buffers in list, previously reserved using drmDMA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_free structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls free_buffer() for each used buffer. ++ * This function is primarily used for debugging. ++ */ ++int drm_freebufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf_free *request = data; ++ int i; ++ int idx; ++ struct drm_buf *buf; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ DRM_DEBUG("%d\n", request->count); ++ for (i = 0; i < request->count; i++) { ++ if (copy_from_user(&idx, &request->list[i], sizeof(idx))) ++ return -EFAULT; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("Index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("Process %d freeing buffer not owned\n", ++ current->pid); ++ return -EINVAL; ++ } ++ drm_free_buffer(dev, buf); ++ } ++ ++ return 0; ++} ++ ++/** ++ * Maps all of the DMA buffers into client-virtual space (ioctl). ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg pointer to a drm_buf_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information ++ * about each buffer into user space. For PCI buffers, it calls do_mmap() with ++ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls ++ * drm_mmap_dma(). ++ */ ++int drm_mapbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int retcode = 0; ++ const int zero = 0; ++ unsigned long virtual; ++ unsigned long address; ++ struct drm_buf_map *request = data; ++ int i; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ return -EINVAL; ++ ++ if (!dma) ++ return -EINVAL; ++ ++ spin_lock(&dev->count_lock); ++ if (atomic_read(&dev->buf_alloc)) { ++ spin_unlock(&dev->count_lock); ++ return -EBUSY; ++ } ++ dev->buf_use++; /* Can't allocate more after this call */ ++ spin_unlock(&dev->count_lock); ++ ++ if (request->count >= dma->buf_count) { ++ if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ++ || (drm_core_check_feature(dev, DRIVER_SG) ++ && (dma->flags & _DRM_DMA_USE_SG)) ++ || (drm_core_check_feature(dev, DRIVER_FB_DMA) ++ && (dma->flags & _DRM_DMA_USE_FB))) { ++ struct drm_map *map = dev->agp_buffer_map; ++ unsigned long token = dev->agp_buffer_token; ++ ++ if (!map) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ down_write(¤t->mm->mmap_sem); ++ virtual = do_mmap(file_priv->filp, 0, map->size, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, ++ token); ++ up_write(¤t->mm->mmap_sem); ++ } else { ++ down_write(¤t->mm->mmap_sem); ++ virtual = do_mmap(file_priv->filp, 0, dma->byte_count, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, 0); ++ up_write(¤t->mm->mmap_sem); ++ } ++ if (virtual > -1024UL) { ++ /* Real error */ ++ retcode = (signed long)virtual; ++ goto done; ++ } ++ request->virtual = (void __user *)virtual; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ if (copy_to_user(&request->list[i].idx, ++ &dma->buflist[i]->idx, ++ sizeof(request->list[0].idx))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ if (copy_to_user(&request->list[i].total, ++ &dma->buflist[i]->total, ++ sizeof(request->list[0].total))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ if (copy_to_user(&request->list[i].used, ++ &zero, sizeof(zero))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ address = virtual + dma->buflist[i]->offset; /* *** */ ++ if (copy_to_user(&request->list[i].address, ++ &address, sizeof(address))) { ++ retcode = -EFAULT; ++ goto done; ++ } ++ } ++ } ++ done: ++ request->count = dma->buf_count; ++ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); ++ ++ return retcode; ++} ++ ++/** ++ * Compute size order. Returns the exponent of the smaller power of two which ++ * is greater or equal to given number. ++ * ++ * \param size size. ++ * \return order. ++ * ++ * \todo Can be made faster. ++ */ ++int drm_order(unsigned long size) ++{ ++ int order; ++ unsigned long tmp; ++ ++ for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; ++ ++ if (size & (size - 1)) ++ ++order; ++ ++ return order; ++} ++EXPORT_SYMBOL(drm_order); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_compat.c +--- git/drivers/gpu/drm-tungsten/drm_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,860 @@ ++/************************************************************************** ++ * ++ * This kernel module is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ **************************************************************************/ ++/* ++ * This code provides access to unexported mm kernel features. It is necessary ++ * to use the new DRM memory manager code with kernels that don't support it ++ * directly. ++ * ++ * Authors: Thomas Hellstrom ++ * Linux kernel mm subsystem authors. ++ * (Most code taken from there). ++ */ ++ ++#include "drmP.h" ++ ++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * These have bad performance in the AGP module for the indicated kernel versions. ++ */ ++ ++int drm_map_page_into_agp(struct page *page) ++{ ++ int i; ++ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); ++ /* Caller's responsibility to call global_flush_tlb() for ++ * performance reasons */ ++ return i; ++} ++ ++int drm_unmap_page_from_agp(struct page *page) ++{ ++ int i; ++ i = change_page_attr(page, 1, PAGE_KERNEL); ++ /* Caller's responsibility to call global_flush_tlb() for ++ * performance reasons */ ++ return i; ++} ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++ ++/* ++ * The protection map was exported in 2.6.19 ++ */ ++ ++pgprot_t vm_get_page_prot(unsigned long vm_flags) ++{ ++#ifdef MODULE ++ static pgprot_t drm_protection_map[16] = { ++ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, ++ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 ++ }; ++ ++ return drm_protection_map[vm_flags & 0x0F]; ++#else ++ extern pgprot_t protection_map[]; ++ return protection_map[vm_flags & 0x0F]; ++#endif ++}; ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * vm code for kernels below 2.6.15 in which version a major vm write ++ * occured. This implement a simple straightforward ++ * version similar to what's going to be ++ * in kernel 2.6.19+ ++ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use ++ * nopfn. ++ */ ++ ++static struct { ++ spinlock_t lock; ++ struct page *dummy_page; ++ atomic_t present; ++} drm_np_retry = ++{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)}; ++ ++ ++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct fault_data *data); ++ ++ ++struct page * get_nopage_retry(void) ++{ ++ if (atomic_read(&drm_np_retry.present) == 0) { ++ struct page *page = alloc_page(GFP_KERNEL); ++ if (!page) ++ return NOPAGE_OOM; ++ spin_lock(&drm_np_retry.lock); ++ drm_np_retry.dummy_page = page; ++ atomic_set(&drm_np_retry.present,1); ++ spin_unlock(&drm_np_retry.lock); ++ } ++ get_page(drm_np_retry.dummy_page); ++ return drm_np_retry.dummy_page; ++} ++ ++void free_nopage_retry(void) ++{ ++ if (atomic_read(&drm_np_retry.present) == 1) { ++ spin_lock(&drm_np_retry.lock); ++ __free_page(drm_np_retry.dummy_page); ++ drm_np_retry.dummy_page = NULL; ++ atomic_set(&drm_np_retry.present, 0); ++ spin_unlock(&drm_np_retry.lock); ++ } ++} ++ ++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type) ++{ ++ struct fault_data data; ++ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ ++ data.address = address; ++ data.vma = vma; ++ drm_bo_vm_fault(vma, &data); ++ switch (data.type) { ++ case VM_FAULT_OOM: ++ return NOPAGE_OOM; ++ case VM_FAULT_SIGBUS: ++ return NOPAGE_SIGBUS; ++ default: ++ break; ++ } ++ ++ return NOPAGE_REFAULT; ++} ++ ++#endif ++ ++#if !defined(DRM_FULL_MM_COMPAT) && \ ++ ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))) ++ ++static int drm_pte_is_clear(struct vm_area_struct *vma, ++ unsigned long addr) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ int ret = 1; ++ pte_t *pte; ++ pmd_t *pmd; ++ pud_t *pud; ++ pgd_t *pgd; ++ ++ spin_lock(&mm->page_table_lock); ++ pgd = pgd_offset(mm, addr); ++ if (pgd_none(*pgd)) ++ goto unlock; ++ pud = pud_offset(pgd, addr); ++ if (pud_none(*pud)) ++ goto unlock; ++ pmd = pmd_offset(pud, addr); ++ if (pmd_none(*pmd)) ++ goto unlock; ++ pte = pte_offset_map(pmd, addr); ++ if (!pte) ++ goto unlock; ++ ret = pte_none(*pte); ++ pte_unmap(pte); ++ unlock: ++ spin_unlock(&mm->page_table_lock); ++ return ret; ++} ++ ++static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ++ unsigned long pfn) ++{ ++ int ret; ++ if (!drm_pte_is_clear(vma, addr)) ++ return -EBUSY; ++ ++ ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); ++ return ret; ++} ++ ++ ++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct fault_data *data) ++{ ++ unsigned long address = data->address; ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ dev = bo->dev; ++ drm_bo_read_lock(&dev->bm.bm_lock, 0); ++ ++ mutex_lock(&bo->mutex); ++ ++ err = drm_bo_wait(bo, 0, 1, 0); ++ if (err) { ++ data->type = (err == -EAGAIN) ? ++ VM_FAULT_MINOR : VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ unsigned long _end = jiffies + 3*DRM_HZ; ++ uint32_t new_mask = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ ++ do { ++ err = drm_bo_move_buffer(bo, new_mask, 0, 0); ++ } while((err == -EAGAIN) && !time_after_eq(jiffies, _end)); ++ ++ if (err) { ++ DRM_ERROR("Timeout moving buffer to mappable location.\n"); ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ } ++ ++ if (address > vma->vm_end) { ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ dev = bo->dev; ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ data->type = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ data->type = VM_FAULT_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, address, pfn); ++ ++ if (!err || err == -EBUSY) ++ data->type = VM_FAULT_MINOR; ++ else ++ data->type = VM_FAULT_OOM; ++out_unlock: ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return NULL; ++} ++ ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ ++ !defined(DRM_FULL_MM_COMPAT) ++ ++/** ++ */ ++ ++unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma, ++ unsigned long address) ++{ ++ struct fault_data data; ++ data.address = address; ++ ++ (void) drm_bo_vm_fault(vma, &data); ++ if (data.type == VM_FAULT_OOM) ++ return NOPFN_OOM; ++ else if (data.type == VM_FAULT_SIGBUS) ++ return NOPFN_SIGBUS; ++ ++ /* ++ * pfn already set. ++ */ ++ ++ return 0; ++} ++#endif ++ ++ ++#ifdef DRM_ODD_MM_COMPAT ++ ++/* ++ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated ++ * workaround for a single BUG statement in do_no_page in these versions. The ++ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ ++ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to ++ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this ++ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex, ++ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm. ++ * phew. ++ */ ++ ++typedef struct p_mm_entry { ++ struct list_head head; ++ struct mm_struct *mm; ++ atomic_t refcount; ++ int locked; ++} p_mm_entry_t; ++ ++typedef struct vma_entry { ++ struct list_head head; ++ struct vm_area_struct *vma; ++} vma_entry_t; ++ ++ ++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ ++ mutex_lock(&bo->mutex); ++ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ ++ if (address > vma->vm_end) { ++ page = NOPAGE_SIGBUS; ++ goto out_unlock; ++ } ++ ++ dev = bo->dev; ++ ++ if (drm_mem_reg_is_pci(dev, &bo->mem)) { ++ DRM_ERROR("Invalid compat nopage.\n"); ++ page = NOPAGE_SIGBUS; ++ goto out_unlock; ++ } ++ ++ ttm = bo->ttm; ++ drm_ttm_fixup_caching(ttm); ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ page = NOPAGE_OOM; ++ goto out_unlock; ++ } ++ ++ get_page(page); ++out_unlock: ++ mutex_unlock(&bo->mutex); ++ return page; ++} ++ ++ ++ ++ ++int drm_bo_map_bound(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; ++ int ret = 0; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ ++ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, ++ &bus_offset, &bus_size); ++ BUG_ON(ret); ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; ++ unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; ++ pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ++ ret = io_remap_pfn_range(vma, vma->vm_start, pfn, ++ vma->vm_end - vma->vm_start, ++ pgprot); ++ } ++ ++ return ret; ++} ++ ++ ++int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) ++{ ++ p_mm_entry_t *entry, *n_entry; ++ vma_entry_t *v_entry; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ); ++ if (!v_entry) { ++ DRM_ERROR("Allocation of vma pointer entry failed\n"); ++ return -ENOMEM; ++ } ++ v_entry->vma = vma; ++ ++ list_add_tail(&v_entry->head, &bo->vma_list); ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ if (mm == entry->mm) { ++ atomic_inc(&entry->refcount); ++ return 0; ++ } else if ((unsigned long)mm < (unsigned long)entry->mm) ; ++ } ++ ++ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ); ++ if (!n_entry) { ++ DRM_ERROR("Allocation of process mm pointer entry failed\n"); ++ return -ENOMEM; ++ } ++ INIT_LIST_HEAD(&n_entry->head); ++ n_entry->mm = mm; ++ n_entry->locked = 0; ++ atomic_set(&n_entry->refcount, 0); ++ list_add_tail(&n_entry->head, &entry->head); ++ ++ return 0; ++} ++ ++void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) ++{ ++ p_mm_entry_t *entry, *n; ++ vma_entry_t *v_entry, *v_n; ++ int found = 0; ++ struct mm_struct *mm = vma->vm_mm; ++ ++ list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) { ++ if (v_entry->vma == vma) { ++ found = 1; ++ list_del(&v_entry->head); ++ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ); ++ break; ++ } ++ } ++ BUG_ON(!found); ++ ++ list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) { ++ if (mm == entry->mm) { ++ if (atomic_add_negative(-1, &entry->refcount)) { ++ list_del(&entry->head); ++ BUG_ON(entry->locked); ++ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ); ++ } ++ return; ++ } ++ } ++ BUG_ON(1); ++} ++ ++ ++ ++int drm_bo_lock_kmm(struct drm_buffer_object * bo) ++{ ++ p_mm_entry_t *entry; ++ int lock_ok = 1; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ BUG_ON(entry->locked); ++ if (!down_write_trylock(&entry->mm->mmap_sem)) { ++ lock_ok = 0; ++ break; ++ } ++ entry->locked = 1; ++ } ++ ++ if (lock_ok) ++ return 0; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ if (!entry->locked) ++ break; ++ up_write(&entry->mm->mmap_sem); ++ entry->locked = 0; ++ } ++ ++ /* ++ * Possible deadlock. Try again. Our callers should handle this ++ * and restart. ++ */ ++ ++ return -EAGAIN; ++} ++ ++void drm_bo_unlock_kmm(struct drm_buffer_object * bo) ++{ ++ p_mm_entry_t *entry; ++ ++ list_for_each_entry(entry, &bo->p_mm_list, head) { ++ BUG_ON(!entry->locked); ++ up_write(&entry->mm->mmap_sem); ++ entry->locked = 0; ++ } ++} ++ ++int drm_bo_remap_bound(struct drm_buffer_object *bo) ++{ ++ vma_entry_t *v_entry; ++ int ret = 0; ++ ++ if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) { ++ list_for_each_entry(v_entry, &bo->vma_list, head) { ++ ret = drm_bo_map_bound(v_entry->vma); ++ if (ret) ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++void drm_bo_finish_unmap(struct drm_buffer_object *bo) ++{ ++ vma_entry_t *v_entry; ++ ++ list_for_each_entry(v_entry, &bo->vma_list, head) { ++ v_entry->vma->vm_flags &= ~VM_PFNMAP; ++ } ++} ++ ++#endif ++ ++#ifdef DRM_IDR_COMPAT_FN ++/* only called when idp->lock is held */ ++static void __free_layer(struct idr *idp, struct idr_layer *p) ++{ ++ p->ary[0] = idp->id_free; ++ idp->id_free = p; ++ idp->id_free_cnt++; ++} ++ ++static void free_layer(struct idr *idp, struct idr_layer *p) ++{ ++ unsigned long flags; ++ ++ /* ++ * Depends on the return element being zeroed. ++ */ ++ spin_lock_irqsave(&idp->lock, flags); ++ __free_layer(idp, p); ++ spin_unlock_irqrestore(&idp->lock, flags); ++} ++ ++/** ++ * idr_for_each - iterate through all stored pointers ++ * @idp: idr handle ++ * @fn: function to be called for each pointer ++ * @data: data passed back to callback function ++ * ++ * Iterate over the pointers registered with the given idr. The ++ * callback function will be called for each pointer currently ++ * registered, passing the id, the pointer and the data pointer passed ++ * to this function. It is not safe to modify the idr tree while in ++ * the callback, so functions such as idr_get_new and idr_remove are ++ * not allowed. ++ * ++ * We check the return of @fn each time. If it returns anything other ++ * than 0, we break out and return that value. ++ * ++* The caller must serialize idr_find() vs idr_get_new() and idr_remove(). ++ */ ++int idr_for_each(struct idr *idp, ++ int (*fn)(int id, void *p, void *data), void *data) ++{ ++ int n, id, max, error = 0; ++ struct idr_layer *p; ++ struct idr_layer *pa[MAX_LEVEL]; ++ struct idr_layer **paa = &pa[0]; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ max = 1 << n; ++ ++ id = 0; ++ while (id < max) { ++ while (n > 0 && p) { ++ n -= IDR_BITS; ++ *paa++ = p; ++ p = p->ary[(id >> n) & IDR_MASK]; ++ } ++ ++ if (p) { ++ error = fn(id, (void *)p, data); ++ if (error) ++ break; ++ } ++ ++ id += 1 << n; ++ while (n < fls(id)) { ++ n += IDR_BITS; ++ p = *--paa; ++ } ++ } ++ ++ return error; ++} ++EXPORT_SYMBOL(idr_for_each); ++ ++/** ++ * idr_remove_all - remove all ids from the given idr tree ++ * @idp: idr handle ++ * ++ * idr_destroy() only frees up unused, cached idp_layers, but this ++ * function will remove all id mappings and leave all idp_layers ++ * unused. ++ * ++ * A typical clean-up sequence for objects stored in an idr tree, will ++ * use idr_for_each() to free all objects, if necessay, then ++ * idr_remove_all() to remove all ids, and idr_destroy() to free ++ * up the cached idr_layers. ++ */ ++void idr_remove_all(struct idr *idp) ++{ ++ int n, id, max, error = 0; ++ struct idr_layer *p; ++ struct idr_layer *pa[MAX_LEVEL]; ++ struct idr_layer **paa = &pa[0]; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ max = 1 << n; ++ ++ id = 0; ++ while (id < max && !error) { ++ while (n > IDR_BITS && p) { ++ n -= IDR_BITS; ++ *paa++ = p; ++ p = p->ary[(id >> n) & IDR_MASK]; ++ } ++ ++ id += 1 << n; ++ while (n < fls(id)) { ++ if (p) { ++ memset(p, 0, sizeof *p); ++ free_layer(idp, p); ++ } ++ n += IDR_BITS; ++ p = *--paa; ++ } ++ } ++ idp->top = NULL; ++ idp->layers = 0; ++} ++EXPORT_SYMBOL(idr_remove_all); ++ ++#endif /* DRM_IDR_COMPAT_FN */ ++ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) ++/** ++ * idr_replace - replace pointer for given id ++ * @idp: idr handle ++ * @ptr: pointer you want associated with the id ++ * @id: lookup key ++ * ++ * Replace the pointer registered with an id and return the old value. ++ * A -ENOENT return indicates that @id was not found. ++ * A -EINVAL return indicates that @id was not within valid constraints. ++ * ++ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). ++ */ ++void *idr_replace(struct idr *idp, void *ptr, int id) ++{ ++ int n; ++ struct idr_layer *p, *old_p; ++ ++ n = idp->layers * IDR_BITS; ++ p = idp->top; ++ ++ id &= MAX_ID_MASK; ++ ++ if (id >= (1 << n)) ++ return ERR_PTR(-EINVAL); ++ ++ n -= IDR_BITS; ++ while ((n > 0) && p) { ++ p = p->ary[(id >> n) & IDR_MASK]; ++ n -= IDR_BITS; ++ } ++ ++ n = id & IDR_MASK; ++ if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) ++ return ERR_PTR(-ENOENT); ++ ++ old_p = p->ary[n]; ++ p->ary[n] = ptr; ++ ++ return (void *)old_p; ++} ++EXPORT_SYMBOL(idr_replace); ++#endif ++ ++#if defined(DRM_KMAP_ATOMIC_PROT_PFN) ++#define drm_kmap_get_fixmap_pte(vaddr) \ ++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) ++ ++void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, ++ pgprot_t protection) ++{ ++ enum fixed_addresses idx; ++ unsigned long vaddr; ++ static pte_t *km_pte; ++ static int initialized = 0; ++ ++ if (unlikely(!initialized)) { ++ km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); ++ initialized = 1; ++ } ++ ++ pagefault_disable(); ++ idx = type + KM_TYPE_NR*smp_processor_id(); ++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ set_pte(km_pte-idx, pfn_pte(pfn, protection)); ++ ++ return (void*) vaddr; ++} ++ ++EXPORT_SYMBOL(kmap_atomic_prot_pfn); ++ ++#endif ++ ++#ifdef DRM_FULL_MM_COMPAT ++#ifdef DRM_NO_FAULT ++unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long ret = NOPFN_REFAULT; ++ ++ if (address > vma->vm_end) ++ return NOPFN_SIGBUS; ++ ++ dev = bo->dev; ++ err = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (err) ++ return NOPFN_REFAULT; ++ ++ err = mutex_lock_interruptible(&bo->mutex); ++ if (err) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return NOPFN_REFAULT; ++ } ++ ++ err = drm_bo_wait(bo, 0, 1, 0, 1); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ goto out_unlock; ++ } ++ ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ uint32_t new_flags = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ err = drm_bo_move_buffer(bo, new_flags, 0, 0); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; ++ goto out_unlock; ++ } ++ } ++ ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ ret = NOPFN_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = (address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ ret = NOPFN_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, address, pfn); ++ if (err) { ++ ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT; ++ goto out_unlock; ++ } ++out_unlock: ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} ++#endif ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.h git-nokia/drivers/gpu/drm-tungsten/drm_compat.h +--- git/drivers/gpu/drm-tungsten/drm_compat.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,380 @@ ++/** ++ * \file drm_compat.h ++ * Backward compatability definitions for Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_COMPAT_H_ ++#define _DRM_COMPAT_H_ ++ ++#ifndef minor ++#define minor(x) MINOR((x)) ++#endif ++ ++#ifndef MODULE_LICENSE ++#define MODULE_LICENSE(x) ++#endif ++ ++#ifndef preempt_disable ++#define preempt_disable() ++#define preempt_enable() ++#endif ++ ++#ifndef pte_offset_map ++#define pte_offset_map pte_offset ++#define pte_unmap(pte) ++#endif ++ ++#ifndef module_param ++#define module_param(name, type, perm) ++#endif ++ ++/* older kernels had different irq args */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++#undef DRM_IRQ_ARGS ++#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs ++#endif ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef list_for_each_entry ++#define list_for_each_entry(pos, head, member) \ ++ for (pos = list_entry((head)->next, typeof(*pos), member), \ ++ prefetch(pos->member.next); \ ++ &pos->member != (head); \ ++ pos = list_entry(pos->member.next, typeof(*pos), member), \ ++ prefetch(pos->member.next)) ++#endif ++ ++#ifndef list_for_each_entry_safe ++#define list_for_each_entry_safe(pos, n, head, member) \ ++ for (pos = list_entry((head)->next, typeof(*pos), member), \ ++ n = list_entry(pos->member.next, typeof(*pos), member); \ ++ &pos->member != (head); \ ++ pos = n, n = list_entry(n->member.next, typeof(*n), member)) ++#endif ++ ++#ifndef __user ++#define __user ++#endif ++ ++#if !defined(__put_page) ++#define __put_page(p) atomic_dec(&(p)->count) ++#endif ++ ++#if !defined(__GFP_COMP) ++#define __GFP_COMP 0 ++#endif ++ ++#if !defined(IRQF_SHARED) ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ++static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot) ++{ ++ return remap_page_range(vma, from, ++ pfn << PAGE_SHIFT, ++ size, ++ pgprot); ++} ++ ++static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags) ++{ ++ void *addr; ++ ++ addr = kmalloc(size * nmemb, flags); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ++#define mutex_lock down ++#define mutex_unlock up ++ ++#define mutex semaphore ++ ++#define mutex_init(a) sema_init((a), 1) ++ ++#endif ++ ++#ifndef DEFINE_SPINLOCK ++#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED ++#endif ++ ++/* old architectures */ ++#ifdef __AMD64__ ++#define __x86_64__ ++#endif ++ ++/* sysfs __ATTR macro */ ++#ifndef __ATTR ++#define __ATTR(_name,_mode,_show,_store) { \ ++ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \ ++ .show = _show, \ ++ .store = _store, \ ++} ++#endif ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ++#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \ ++ if (tmp) memset(tmp, 0, size); \ ++ (tmp);}) ++#endif ++ ++#ifndef list_for_each_entry_safe_reverse ++#define list_for_each_entry_safe_reverse(pos, n, head, member) \ ++ for (pos = list_entry((head)->prev, typeof(*pos), member), \ ++ n = list_entry(pos->member.prev, typeof(*pos), member); \ ++ &pos->member != (head); \ ++ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) ++#endif ++ ++#include ++#include ++ ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) ++#define DRM_ODD_MM_COMPAT ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) ++#define DRM_FULL_MM_COMPAT ++#endif ++ ++ ++/* ++ * Flush relevant caches and clear a VMA structure so that page references ++ * will cause a page fault. Don't flush tlbs. ++ */ ++ ++extern void drm_clear_vma(struct vm_area_struct *vma, ++ unsigned long addr, unsigned long end); ++ ++/* ++ * Return the PTE protection map entries for the VMA flags given by ++ * flags. This is a functional interface to the kernel's protection map. ++ */ ++ ++extern pgprot_t vm_get_page_prot(unsigned long vm_flags); ++ ++#ifndef GFP_DMA32 ++#define GFP_DMA32 GFP_KERNEL ++#endif ++#ifndef __GFP_DMA32 ++#define __GFP_DMA32 GFP_KERNEL ++#endif ++ ++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ++/* ++ * These are too slow in earlier kernels. ++ */ ++ ++extern int drm_unmap_page_from_agp(struct page *page); ++extern int drm_map_page_into_agp(struct page *page); ++ ++#define map_page_into_agp drm_map_page_into_agp ++#define unmap_page_from_agp drm_unmap_page_from_agp ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++extern struct page *get_nopage_retry(void); ++extern void free_nopage_retry(void); ++ ++#define NOPAGE_REFAULT get_nopage_retry() ++#endif ++ ++ ++#ifndef DRM_FULL_MM_COMPAT ++ ++/* ++ * For now, just return a dummy page that we've allocated out of ++ * static space. The page will be put by do_nopage() since we've already ++ * filled out the pte. ++ */ ++ ++struct fault_data { ++ struct vm_area_struct *vma; ++ unsigned long address; ++ pgoff_t pgoff; ++ unsigned int flags; ++ ++ int type; ++}; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, ++ int *type); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ ++ !defined(DRM_FULL_MM_COMPAT) ++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address); ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ ++#endif /* ndef DRM_FULL_MM_COMPAT */ ++ ++#ifdef DRM_ODD_MM_COMPAT ++ ++struct drm_buffer_object; ++ ++ ++/* ++ * Add a vma to the ttm vma list, and the ++ * process mm pointer to the ttm mm list. Needs the ttm mutex. ++ */ ++ ++extern int drm_bo_add_vma(struct drm_buffer_object * bo, ++ struct vm_area_struct *vma); ++/* ++ * Delete a vma and the corresponding mm pointer from the ++ * ttm lists. Needs the ttm mutex. ++ */ ++extern void drm_bo_delete_vma(struct drm_buffer_object * bo, ++ struct vm_area_struct *vma); ++ ++/* ++ * Attempts to lock all relevant mmap_sems for a ttm, while ++ * not releasing the ttm mutex. May return -EAGAIN to avoid ++ * deadlocks. In that case the caller shall release the ttm mutex, ++ * schedule() and try again. ++ */ ++ ++extern int drm_bo_lock_kmm(struct drm_buffer_object * bo); ++ ++/* ++ * Unlock all relevant mmap_sems for a ttm. ++ */ ++extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo); ++ ++/* ++ * If the ttm was bound to the aperture, this function shall be called ++ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all ++ * vmas mapping this ttm. This is needed just after unmapping the ptes of ++ * the vma, otherwise the do_nopage() function will bug :(. The function ++ * releases the mmap_sems for this ttm. ++ */ ++ ++extern void drm_bo_finish_unmap(struct drm_buffer_object *bo); ++ ++/* ++ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot ++ * fault these pfns in, because the first one will set the vma VM_PFNMAP ++ * flag, which will make the next fault bug in do_nopage(). The function ++ * releases the mmap_sems for this ttm. ++ */ ++ ++extern int drm_bo_remap_bound(struct drm_buffer_object *bo); ++ ++ ++/* ++ * Remap a vma for a bound ttm. Call with the ttm mutex held and ++ * the relevant mmap_sem locked. ++ */ ++extern int drm_bo_map_bound(struct vm_area_struct *vma); ++ ++#endif ++ ++/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) ++#define DRM_IDR_COMPAT_FN ++#define DRM_NO_FAULT ++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, ++ unsigned long address); ++#endif ++#ifdef DRM_IDR_COMPAT_FN ++int idr_for_each(struct idr *idp, ++ int (*fn)(int id, void *p, void *data), void *data); ++void idr_remove_all(struct idr *idp); ++#endif ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) ++void *idr_replace(struct idr *idp, void *ptr, int id); ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) ++typedef _Bool bool; ++#endif ++ ++ ++#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM)) ++#define DRM_KMAP_ATOMIC_PROT_PFN ++extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, ++ pgprot_t protection); ++#endif ++ ++#if !defined(flush_agp_mappings) ++#define flush_agp_mappings() do {} while(0) ++#endif ++ ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) ++#endif ++ ++#ifndef VM_CAN_NONLINEAR ++#define DRM_VM_NOPAGE 1 ++#endif ++ ++#ifdef DRM_VM_NOPAGE ++ ++extern struct page *drm_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++ ++extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type); ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) ++#define drm_core_ioremap_wc drm_core_ioremap ++#endif ++ ++#ifndef OS_HAS_GEM ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) ++#define OS_HAS_GEM 1 ++#else ++#define OS_HAS_GEM 0 ++#endif ++#endif ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_context.c git-nokia/drivers/gpu/drm-tungsten/drm_context.c +--- git/drivers/gpu/drm-tungsten/drm_context.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_context.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,472 @@ ++/** ++ * \file drm_context.c ++ * IOCTLs for generic contexts ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * ChangeLog: ++ * 2001-11-16 Torsten Duwe ++ * added context constructor/destructor hooks, ++ * needed by SiS driver's memory management. ++ */ ++ ++#include "drmP.h" ++ ++/******************************************************************/ ++/** \name Context bitmap support */ ++/*@{*/ ++ ++/** ++ * Free a handle from the context bitmap. ++ * ++ * \param dev DRM device. ++ * \param ctx_handle context handle. ++ * ++ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry ++ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex ++ * lock. ++ */ ++void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_remove(&dev->ctx_idr, ctx_handle); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Context bitmap allocation. ++ * ++ * \param dev DRM device. ++ * \return (non-negative) context handle on success or a negative number on failure. ++ * ++ * Allocate a new idr from drm_device::ctx_idr while holding the ++ * drm_device::struct_mutex lock. ++ */ ++static int drm_ctxbitmap_next(struct drm_device *dev) ++{ ++ int new_id; ++ int ret; ++ ++again: ++ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ mutex_lock(&dev->struct_mutex); ++ ret = idr_get_new_above(&dev->ctx_idr, NULL, ++ DRM_RESERVED_CONTEXTS, &new_id); ++ if (ret == -EAGAIN) { ++ mutex_unlock(&dev->struct_mutex); ++ goto again; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ return new_id; ++} ++ ++/** ++ * Context bitmap initialization. ++ * ++ * \param dev DRM device. ++ * ++ * Initialise the drm_device::ctx_idr ++ */ ++int drm_ctxbitmap_init(struct drm_device *dev) ++{ ++ idr_init(&dev->ctx_idr); ++ return 0; ++} ++ ++/** ++ * Context bitmap cleanup. ++ * ++ * \param dev DRM device. ++ * ++ * Free all idr members using drm_ctx_sarea_free helper function ++ * while holding the drm_device::struct_mutex lock. ++ */ ++void drm_ctxbitmap_cleanup(struct drm_device *dev) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_remove_all(&dev->ctx_idr); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/*@}*/ ++ ++/******************************************************************/ ++/** \name Per Context SAREA Support */ ++/*@{*/ ++ ++/** ++ * Get per-context SAREA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_priv_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Gets the map from drm_device::ctx_idr with the handle specified and ++ * returns its handle. ++ */ ++int drm_getsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_priv_map *request = data; ++ struct drm_map *map; ++ struct drm_map_list *_entry; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ map = idr_find(&dev->ctx_idr, request->ctx_id); ++ if (!map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ request->handle = NULL; ++ list_for_each_entry(_entry, &dev->maplist, head) { ++ if (_entry->map == map) { ++ request->handle = ++ (void *)(unsigned long)_entry->user_token; ++ break; ++ } ++ } ++ if (request->handle == NULL) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/** ++ * Set per-context SAREA. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_priv_map structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches the mapping specified in \p arg and update the entry in ++ * drm_device::ctx_idr with it. ++ */ ++int drm_setsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_priv_map *request = data; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list = NULL; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map ++ && r_list->user_token == (unsigned long) request->handle) ++ goto found; ++ } ++ bad: ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ ++ found: ++ map = r_list->map; ++ if (!map) ++ goto bad; ++ ++ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) ++ goto bad; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++/******************************************************************/ ++/** \name The actual DRM context handling routines */ ++/*@{*/ ++ ++/** ++ * Switch context. ++ * ++ * \param dev DRM device. ++ * \param old old context handle. ++ * \param new new context handle. ++ * \return zero on success or a negative number on failure. ++ * ++ * Attempt to set drm_device::context_flag. ++ */ ++static int drm_context_switch(struct drm_device *dev, int old, int new) ++{ ++ if (test_and_set_bit(0, &dev->context_flag)) { ++ DRM_ERROR("Reentering -- FIXME\n"); ++ return -EBUSY; ++ } ++ ++ DRM_DEBUG("Context switch from %d to %d\n", old, new); ++ ++ if (new == dev->last_context) { ++ clear_bit(0, &dev->context_flag); ++ return 0; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Complete context switch. ++ * ++ * \param dev DRM device. ++ * \param new new context handle. ++ * \return zero on success or a negative number on failure. ++ * ++ * Updates drm_device::last_context and drm_device::last_switch. Verifies the ++ * hardware lock is held, clears the drm_device::context_flag and wakes up ++ * drm_device::context_wait. ++ */ ++static int drm_context_switch_complete(struct drm_device *dev, int new) ++{ ++ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ ++ dev->last_switch = jiffies; ++ ++ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { ++ DRM_ERROR("Lock isn't held after context switch\n"); ++ } ++ ++ /* If a context switch is ever initiated ++ when the kernel holds the lock, release ++ that lock here. */ ++ clear_bit(0, &dev->context_flag); ++ wake_up(&dev->context_wait); ++ ++ return 0; ++} ++ ++/** ++ * Reserve contexts. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx_res structure. ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_resctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_res *res = data; ++ struct drm_ctx ctx; ++ int i; ++ ++ if (res->count >= DRM_RESERVED_CONTEXTS) { ++ memset(&ctx, 0, sizeof(ctx)); ++ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ++ ctx.handle = i; ++ if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) ++ return -EFAULT; ++ } ++ } ++ res->count = DRM_RESERVED_CONTEXTS; ++ ++ return 0; ++} ++ ++/** ++ * Add context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Get a new handle for the context and copy to userspace. ++ */ ++int drm_addctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx_list *ctx_entry; ++ struct drm_ctx *ctx = data; ++ ++ ctx->handle = drm_ctxbitmap_next(dev); ++ if (ctx->handle == DRM_KERNEL_CONTEXT) { ++ /* Skip kernel's context and get a new one. */ ++ ctx->handle = drm_ctxbitmap_next(dev); ++ } ++ DRM_DEBUG("%d\n", ctx->handle); ++ if (ctx->handle == -1) { ++ DRM_DEBUG("Not enough free contexts.\n"); ++ /* Should this return -EBUSY instead? */ ++ return -ENOMEM; ++ } ++ ++ if (ctx->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_ctor) ++ if (!dev->driver->context_ctor(dev, ctx->handle)) { ++ DRM_DEBUG("Running out of ctxs or memory.\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST); ++ if (!ctx_entry) { ++ DRM_DEBUG("out of memory\n"); ++ return -ENOMEM; ++ } ++ ++ INIT_LIST_HEAD(&ctx_entry->head); ++ ctx_entry->handle = ctx->handle; ++ ctx_entry->tag = file_priv; ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ list_add(&ctx_entry->head, &dev->ctxlist); ++ ++dev->ctx_count; ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ return 0; ++} ++ ++int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ /* This does nothing */ ++ return 0; ++} ++ ++/** ++ * Get context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ /* This is 0, because we don't handle any context flags */ ++ ctx->flags = 0; ++ ++ return 0; ++} ++ ++/** ++ * Switch context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls context_switch(). ++ */ ++int drm_switchctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ return drm_context_switch(dev, dev->last_context, ctx->handle); ++} ++ ++/** ++ * New context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls context_switch_complete(). ++ */ ++int drm_newctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ drm_context_switch_complete(dev, ctx->handle); ++ ++ return 0; ++} ++ ++/** ++ * Remove context. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument pointing to a drm_ctx structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * If not the special kernel context, calls ctxbitmap_free() to free the specified context. ++ */ ++int drm_rmctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_ctx *ctx = data; ++ ++ DRM_DEBUG("%d\n", ctx->handle); ++ if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { ++ file_priv->remove_auth_on_close = 1; ++ } ++ if (ctx->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_dtor) ++ dev->driver->context_dtor(dev, ctx->handle); ++ drm_ctxbitmap_free(dev, ctx->handle); ++ } ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ if (!list_empty(&dev->ctxlist)) { ++ struct drm_ctx_list *pos, *n; ++ ++ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { ++ if (pos->handle == ctx->handle) { ++ list_del(&pos->head); ++ drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); ++ --dev->ctx_count; ++ } ++ } ++ } ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ return 0; ++} ++ ++/*@}*/ +diff -Nurd git/drivers/gpu/drm-tungsten/drm_core.h git-nokia/drivers/gpu/drm-tungsten/drm_core.h +--- git/drivers/gpu/drm-tungsten/drm_core.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_core.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,35 @@ ++/* ++ * Copyright 2004 Jon Smirl ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl" ++ ++#define CORE_NAME "drm" ++#define CORE_DESC "DRM shared core routines" ++#define CORE_DATE "20060810" ++ ++#define DRM_IF_MAJOR 1 ++#define DRM_IF_MINOR 3 ++ ++#define CORE_MAJOR 1 ++#define CORE_MINOR 1 ++#define CORE_PATCHLEVEL 0 +diff -Nurd git/drivers/gpu/drm-tungsten/drm_dma.c git-nokia/drivers/gpu/drm-tungsten/drm_dma.c +--- git/drivers/gpu/drm-tungsten/drm_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,179 @@ ++/** ++ * \file drm_dma.c ++ * DMA IOCTL and function support ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Initialize the DMA data. ++ * ++ * \param dev DRM device. ++ * \return zero on success or a negative value on failure. ++ * ++ * Allocate and initialize a drm_device_dma structure. ++ */ ++int drm_dma_setup(struct drm_device *dev) ++{ ++ int i; ++ ++ dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); ++ if (!dev->dma) ++ return -ENOMEM; ++ ++ memset(dev->dma, 0, sizeof(*dev->dma)); ++ ++ for (i = 0; i <= DRM_MAX_ORDER; i++) ++ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); ++ ++ return 0; ++} ++ ++/** ++ * Cleanup the DMA resources. ++ * ++ * \param dev DRM device. ++ * ++ * Free all pages associated with DMA buffers, the buffers and pages lists, and ++ * finally the drm_device::dma structure itself. ++ */ ++void drm_dma_takedown(struct drm_device *dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i, j; ++ ++ if (!dma) ++ return; ++ ++ /* Clear dma buffers */ ++ for (i = 0; i <= DRM_MAX_ORDER; i++) { ++ if (dma->bufs[i].seg_count) { ++ DRM_DEBUG("order %d: buf_count = %d," ++ " seg_count = %d\n", ++ i, ++ dma->bufs[i].buf_count, ++ dma->bufs[i].seg_count); ++ for (j = 0; j < dma->bufs[i].seg_count; j++) { ++ if (dma->bufs[i].seglist[j]) { ++ drm_pci_free(dev, dma->bufs[i].seglist[j]); ++ } ++ } ++ drm_free(dma->bufs[i].seglist, ++ dma->bufs[i].seg_count ++ * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); ++ } ++ if (dma->bufs[i].buf_count) { ++ for (j = 0; j < dma->bufs[i].buf_count; j++) { ++ if (dma->bufs[i].buflist[j].dev_private) { ++ drm_free(dma->bufs[i].buflist[j]. ++ dev_private, ++ dma->bufs[i].buflist[j]. ++ dev_priv_size, DRM_MEM_BUFS); ++ } ++ } ++ drm_free(dma->bufs[i].buflist, ++ dma->bufs[i].buf_count * ++ sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); ++ } ++ } ++ ++ if (dma->buflist) { ++ drm_free(dma->buflist, ++ dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); ++ } ++ ++ if (dma->pagelist) { ++ drm_free(dma->pagelist, ++ dma->page_count * sizeof(*dma->pagelist), ++ DRM_MEM_PAGES); ++ } ++ drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); ++ dev->dma = NULL; ++} ++ ++/** ++ * Free a buffer. ++ * ++ * \param dev DRM device. ++ * \param buf buffer to free. ++ * ++ * Resets the fields of \p buf. ++ */ ++void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf) ++{ ++ if (!buf) ++ return; ++ ++ buf->waiting = 0; ++ buf->pending = 0; ++ buf->file_priv = NULL; ++ buf->used = 0; ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) ++ && waitqueue_active(&buf->dma_wait)) { ++ wake_up_interruptible(&buf->dma_wait); ++ } ++} ++ ++/** ++ * Reclaim the buffers. ++ * ++ * \param file_priv DRM file private. ++ * ++ * Frees each buffer associated with \p file_priv not already on the hardware. ++ */ ++void drm_core_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma) ++ return; ++ for (i = 0; i < dma->buf_count; i++) { ++ if (dma->buflist[i]->file_priv == file_priv) { ++ switch (dma->buflist[i]->list) { ++ case DRM_LIST_NONE: ++ drm_free_buffer(dev, dma->buflist[i]); ++ break; ++ case DRM_LIST_WAIT: ++ dma->buflist[i]->list = DRM_LIST_RECLAIM; ++ break; ++ default: ++ /* Buffer already on hardware. */ ++ break; ++ } ++ } ++ } ++} ++EXPORT_SYMBOL(drm_core_reclaim_buffers); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_drawable.c git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c +--- git/drivers/gpu/drm-tungsten/drm_drawable.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,192 @@ ++/** ++ * \file drm_drawable.c ++ * IOCTLs for drawables ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * \author Michel Dänzer ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++/** ++ * Allocate drawable ID and memory to store information about it. ++ */ ++int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ unsigned long irqflags; ++ struct drm_draw *draw = data; ++ int new_id = 0; ++ int ret; ++ ++again: ++ if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); ++ if (ret == -EAGAIN) { ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ goto again; ++ } ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ ++ draw->handle = new_id; ++ ++ DRM_DEBUG("%d\n", draw->handle); ++ ++ return 0; ++} ++ ++/** ++ * Free drawable ID and memory to store information about it. ++ */ ++int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_draw *draw = data; ++ unsigned long irqflags; ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ++ drm_free(drm_get_drawable_info(dev, draw->handle), ++ sizeof(struct drm_drawable_info), DRM_MEM_BUFS); ++ ++ idr_remove(&dev->drw_idr, draw->handle); ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ DRM_DEBUG("%d\n", draw->handle); ++ return 0; ++} ++ ++int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_update_draw *update = data; ++ unsigned long irqflags; ++ struct drm_clip_rect *rects; ++ struct drm_drawable_info *info; ++ int err; ++ ++ info = idr_find(&dev->drw_idr, update->handle); ++ if (!info) { ++ info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); ++ if (!info) ++ return -ENOMEM; ++ if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { ++ DRM_ERROR("No such drawable %d\n", update->handle); ++ drm_free(info, sizeof(*info), DRM_MEM_BUFS); ++ return -EINVAL; ++ } ++ } ++ ++ switch (update->type) { ++ case DRM_DRAWABLE_CLIPRECTS: ++ if (update->num != info->num_rects) { ++ rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), ++ DRM_MEM_BUFS); ++ } else ++ rects = info->rects; ++ ++ if (update->num && !rects) { ++ DRM_ERROR("Failed to allocate cliprect memory\n"); ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ if (update->num && DRM_COPY_FROM_USER(rects, ++ (struct drm_clip_rect __user *) ++ (unsigned long)update->data, ++ update->num * ++ sizeof(*rects))) { ++ DRM_ERROR("Failed to copy cliprects from userspace\n"); ++ err = -EFAULT; ++ goto error; ++ } ++ ++ spin_lock_irqsave(&dev->drw_lock, irqflags); ++ ++ if (rects != info->rects) { ++ drm_free(info->rects, info->num_rects * ++ sizeof(struct drm_clip_rect), DRM_MEM_BUFS); ++ } ++ ++ info->rects = rects; ++ info->num_rects = update->num; ++ ++ spin_unlock_irqrestore(&dev->drw_lock, irqflags); ++ ++ DRM_DEBUG("Updated %d cliprects for drawable %d\n", ++ info->num_rects, update->handle); ++ break; ++ default: ++ DRM_ERROR("Invalid update type %d\n", update->type); ++ return -EINVAL; ++ } ++ ++ return 0; ++ ++error: ++ if (rects != info->rects) ++ drm_free(rects, update->num * sizeof(struct drm_clip_rect), ++ DRM_MEM_BUFS); ++ ++ return err; ++} ++ ++/** ++ * Caller must hold the drawable spinlock! ++ */ ++struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) ++{ ++ return idr_find(&dev->drw_idr, id); ++} ++EXPORT_SYMBOL(drm_get_drawable_info); ++ ++static int drm_drawable_free(int idr, void *p, void *data) ++{ ++ struct drm_drawable_info *info = p; ++ ++ if (info) { ++ drm_free(info->rects, info->num_rects * ++ sizeof(struct drm_clip_rect), DRM_MEM_BUFS); ++ drm_free(info, sizeof(*info), DRM_MEM_BUFS); ++ } ++ ++ return 0; ++} ++ ++void drm_drawable_free_all(struct drm_device *dev) ++{ ++ idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); ++ idr_remove_all(&dev->drw_idr); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_drv.c git-nokia/drivers/gpu/drm-tungsten/drm_drv.c +--- git/drivers/gpu/drm-tungsten/drm_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,697 @@ ++/** ++ * \file drm_drv.c ++ * Generic driver template ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * ++ * To use this template, you must at least define the following (samples ++ * given for the MGA driver): ++ * ++ * \code ++ * #define DRIVER_AUTHOR "VA Linux Systems, Inc." ++ * ++ * #define DRIVER_NAME "mga" ++ * #define DRIVER_DESC "Matrox G200/G400" ++ * #define DRIVER_DATE "20001127" ++ * ++ * #define drm_x mga_##x ++ * \endcode ++ */ ++ ++/* ++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "drm_core.h" ++ ++static void drm_cleanup(struct drm_device * dev); ++int drm_fb_loaded = 0; ++ ++static int drm_version(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++/** Ioctl table */ ++static struct drm_ioctl_desc drm_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), ++ /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ ++ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++#if __OS_HAS_AGP ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++#endif ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, ++ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), ++ ++ DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), ++ ++#if OS_HAS_GEM ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), ++#endif ++}; ++ ++#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) ++ ++ ++/** ++ * Take down the DRM device. ++ * ++ * \param dev DRM device structure. ++ * ++ * Frees every resource in \p dev. ++ * ++ * \sa drm_device ++ */ ++int drm_lastclose(struct drm_device * dev) ++{ ++ struct drm_magic_entry *pt, *next; ++ struct drm_map_list *r_list, *list_t; ++ struct drm_vma_entry *vma, *vma_temp; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ /* ++ * We can't do much about this function failing. ++ */ ++ ++ drm_bo_driver_finish(dev); ++ ++ if (dev->driver->lastclose) ++ dev->driver->lastclose(dev); ++ DRM_DEBUG("driver lastclose completed\n"); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ /* Free drawable information memory */ ++ mutex_lock(&dev->struct_mutex); ++ ++ drm_drawable_free_all(dev); ++ del_timer(&dev->timer); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ ++ if (dev->magicfree.next) { ++ list_for_each_entry_safe(pt, next, &dev->magicfree, head) { ++ list_del(&pt->head); ++ drm_ht_remove_item(&dev->magiclist, &pt->hash_item); ++ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); ++ } ++ drm_ht_remove(&dev->magiclist); ++ } ++ ++ ++ /* Clear AGP information */ ++ if (drm_core_has_AGP(dev) && dev->agp) { ++ struct drm_agp_mem *entry, *tempe; ++ ++ /* Remove AGP resources, but leave dev->agp ++ intact until drv_cleanup is called. */ ++ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { ++ if (entry->bound) ++ drm_unbind_agp(entry->memory); ++ drm_free_agp(entry->memory, entry->pages); ++ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); ++ } ++ INIT_LIST_HEAD(&dev->agp->memory); ++ ++ if (dev->agp->acquired) ++ drm_agp_release(dev); ++ ++ dev->agp->acquired = 0; ++ dev->agp->enabled = 0; ++ } ++ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { ++ drm_sg_cleanup(dev->sg); ++ dev->sg = NULL; ++ } ++ ++ /* Clear vma list (only built for debugging) */ ++ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { ++ list_del(&vma->head); ++ drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS); ++ } ++ ++ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { ++ if (!(r_list->map->flags & _DRM_DRIVER)) { ++ drm_rmmap_locked(dev, r_list->map); ++ r_list = NULL; ++ } ++ } ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { ++ for (i = 0; i < dev->queue_count; i++) { ++ ++ if (dev->queuelist[i]) { ++ drm_free(dev->queuelist[i], ++ sizeof(*dev->queuelist[0]), ++ DRM_MEM_QUEUES); ++ dev->queuelist[i] = NULL; ++ } ++ } ++ drm_free(dev->queuelist, ++ dev->queue_slots * sizeof(*dev->queuelist), ++ DRM_MEM_QUEUES); ++ dev->queuelist = NULL; ++ } ++ dev->queue_count = 0; ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) ++ drm_dma_takedown(dev); ++ ++ if (dev->lock.hw_lock) { ++ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ ++ dev->lock.file_priv = NULL; ++ wake_up_interruptible(&dev->lock.lock_queue); ++ } ++ dev->dev_mapping = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ ++ DRM_DEBUG("lastclose completed\n"); ++ return 0; ++} ++ ++void drm_cleanup_pci(struct pci_dev *pdev) ++{ ++ struct drm_device *dev = pci_get_drvdata(pdev); ++ ++ pci_set_drvdata(pdev, NULL); ++ pci_release_regions(pdev); ++ if (dev) ++ drm_cleanup(dev); ++} ++EXPORT_SYMBOL(drm_cleanup_pci); ++ ++/** ++ * Module initialization. Called via init_module at module load time, or via ++ * linux/init/main.c (this is not currently supported). ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Initializes an array of drm_device structures, and attempts to ++ * initialize all available devices, using consecutive minors, registering the ++ * stubs and initializing the AGP device. ++ * ++ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and ++ * after the initialization for driver customization. ++ */ ++int drm_init(struct drm_driver *driver, ++ struct pci_device_id *pciidlist) ++{ ++ struct pci_dev *pdev; ++ struct pci_device_id *pid; ++ int rc, i; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) { ++ pid = &pciidlist[i]; ++ ++ pdev = NULL; ++ /* pass back in pdev to account for multiple identical cards */ ++ while ((pdev = ++ pci_get_subsys(pid->vendor, pid->device, pid->subvendor, ++ pid->subdevice, pdev))) { ++ /* Are there device class requirements? */ ++ if ((pid->class != 0) ++ && ((pdev->class & pid->class_mask) != pid->class)) { ++ continue; ++ } ++ /* is there already a driver loaded, or (short circuit saves work) */ ++ /* does something like VesaFB have control of the memory region? */ ++ if ( ++#ifdef CONFIG_PCI ++ pci_dev_driver(pdev) || ++#endif ++ pci_request_regions(pdev, "DRM scan")) { ++ /* go into stealth mode */ ++ drm_fb_loaded = 1; ++ pci_dev_put(pdev); ++ break; ++ } ++ /* no fbdev or vesadev, put things back and wait for normal probe */ ++ pci_release_regions(pdev); ++ } ++ } ++ ++ if (!drm_fb_loaded) ++ return pci_register_driver(&driver->pci_driver); ++ else { ++ for (i = 0; pciidlist[i].vendor != 0; i++) { ++ pid = &pciidlist[i]; ++ ++ pdev = NULL; ++ /* pass back in pdev to account for multiple identical cards */ ++ while ((pdev = ++ pci_get_subsys(pid->vendor, pid->device, ++ pid->subvendor, pid->subdevice, ++ pdev))) { ++ /* Are there device class requirements? */ ++ if ((pid->class != 0) ++ && ((pdev->class & pid->class_mask) != pid->class)) { ++ continue; ++ } ++#ifdef CONFIG_PCI ++ /* stealth mode requires a manual probe */ ++ pci_dev_get(pdev); ++#endif ++ if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { ++ pci_dev_put(pdev); ++ return rc; ++ } ++ } ++ } ++ DRM_INFO("Used old pci detect: framebuffer loaded\n"); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_init); ++ ++/** ++ * Called via cleanup_module() at module unload time. ++ * ++ * Cleans up all DRM device, calling drm_lastclose(). ++ * ++ * \sa drm_init ++ */ ++static void drm_cleanup(struct drm_device * dev) ++{ ++ ++ DRM_DEBUG("\n"); ++ if (!dev) { ++ DRM_ERROR("cleanup called no dev\n"); ++ return; ++ } ++ ++ drm_lastclose(dev); ++ drm_fence_manager_takedown(dev); ++ ++ if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp ++ && dev->agp->agp_mtrr >= 0) { ++ int retval; ++ retval = mtrr_del(dev->agp->agp_mtrr, ++ dev->agp->agp_info.aper_base, ++ dev->agp->agp_info.aper_size * 1024 * 1024); ++ DRM_DEBUG("mtrr_del=%d\n", retval); ++ } ++ ++ if (drm_core_has_AGP(dev) && dev->agp) { ++ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); ++ dev->agp = NULL; ++ } ++ if (dev->driver->unload) ++ dev->driver->unload(dev); ++ ++ if (!drm_fb_loaded) ++ pci_disable_device(dev->pdev); ++ ++ drm_ctxbitmap_cleanup(dev); ++ drm_ht_remove(&dev->map_hash); ++ drm_mm_takedown(&dev->offset_manager); ++ drm_ht_remove(&dev->object_hash); ++ ++ drm_put_minor(dev); ++ if (drm_put_dev(dev)) ++ DRM_ERROR("Cannot unload module\n"); ++} ++ ++int drm_minors_cleanup(int id, void *ptr, void *data) ++{ ++ struct drm_minor *minor = ptr; ++ struct drm_device *dev; ++ struct drm_driver *driver = data; ++ ++ dev = minor->dev; ++ if (minor->dev->driver != driver) ++ return 0; ++ ++ if (minor->type != DRM_MINOR_LEGACY) ++ return 0; ++ ++ if (dev) ++ pci_dev_put(dev->pdev); ++ drm_cleanup(dev); ++ return 1; ++} ++ ++void drm_exit(struct drm_driver *driver) ++{ ++ DRM_DEBUG("\n"); ++ if (drm_fb_loaded) { ++ idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); ++ } else ++ pci_unregister_driver(&driver->pci_driver); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ free_nopage_retry(); ++#endif ++ DRM_INFO("Module unloaded\n"); ++} ++EXPORT_SYMBOL(drm_exit); ++ ++/** File operations structure */ ++static const struct file_operations drm_stub_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_stub_open ++}; ++ ++static int __init drm_core_init(void) ++{ ++ int ret; ++ struct sysinfo si; ++ unsigned long avail_memctl_mem; ++ unsigned long max_memctl_mem; ++ ++ idr_init(&drm_minors_idr); ++ si_meminfo(&si); ++ ++ /* ++ * AGP only allows low / DMA32 memory ATM. ++ */ ++ ++ avail_memctl_mem = si.totalram - si.totalhigh; ++ ++ /* ++ * Avoid overflows ++ */ ++ ++ max_memctl_mem = 1UL << (32 - PAGE_SHIFT); ++ max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; ++ ++ if (avail_memctl_mem >= max_memctl_mem) ++ avail_memctl_mem = max_memctl_mem; ++ ++ drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); ++ ++ ret = -ENOMEM; ++ ++ if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) ++ goto err_p1; ++ ++ drm_class = drm_sysfs_create(THIS_MODULE, "drm"); ++ if (IS_ERR(drm_class)) { ++ printk(KERN_ERR "DRM: Error creating drm class.\n"); ++ ret = PTR_ERR(drm_class); ++ goto err_p2; ++ } ++ ++ drm_proc_root = proc_mkdir("dri", NULL); ++ if (!drm_proc_root) { ++ DRM_ERROR("Cannot create /proc/dri\n"); ++ ret = -1; ++ goto err_p3; ++ } ++ ++ drm_mem_init(); ++ ++ DRM_INFO("Initialized %s %d.%d.%d %s\n", ++ CORE_NAME, ++ CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); ++ return 0; ++err_p3: ++ drm_sysfs_destroy(); ++err_p2: ++ unregister_chrdev(DRM_MAJOR, "drm"); ++ ++ idr_destroy(&drm_minors_idr); ++err_p1: ++ return ret; ++} ++ ++static void __exit drm_core_exit(void) ++{ ++ remove_proc_entry("dri", NULL); ++ drm_sysfs_destroy(); ++ ++ unregister_chrdev(DRM_MAJOR, "drm"); ++ ++ idr_destroy(&drm_minors_idr); ++} ++ ++module_init(drm_core_init); ++module_exit(drm_core_exit); ++ ++/** ++ * Get version information ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_version structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Fills in the version information in \p arg. ++ */ ++static int drm_version(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_version *version = data; ++ int len; ++ ++ version->version_major = dev->driver->major; ++ version->version_minor = dev->driver->minor; ++ version->version_patchlevel = dev->driver->patchlevel; ++ DRM_COPY(version->name, dev->driver->name); ++ DRM_COPY(version->date, dev->driver->date); ++ DRM_COPY(version->desc, dev->driver->desc); ++ ++ return 0; ++} ++ ++/** ++ * Called whenever a process performs an ioctl on /dev/drm. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ * ++ * Looks up the ioctl function in the ::ioctls table, checking for root ++ * previleges if so required, and dispatches to the respective function. ++ * ++ * Copies data in and out according to the size and direction given in cmd, ++ * which must match the ioctl cmd known by the kernel. The kernel uses a 512 ++ * byte stack buffer to store the ioctl arguments in kernel space. Should we ++ * ever need much larger ioctl arguments, we may need to allocate memory. ++ */ ++int drm_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ return drm_unlocked_ioctl(filp, cmd, arg); ++} ++EXPORT_SYMBOL(drm_ioctl); ++ ++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_ioctl_desc *ioctl; ++ drm_ioctl_t *func; ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ int retcode = -EINVAL; ++ char kdata[512]; ++ ++ atomic_inc(&dev->ioctl_count); ++ atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++ ++file_priv->ioctl_count; ++ ++ DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", ++ current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device), ++ file_priv->authenticated); ++ ++ if ((nr >= DRM_CORE_IOCTL_COUNT) && ++ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) ++ goto err_i1; ++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) ++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ++ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; ++ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ++ ioctl = &drm_ioctls[nr]; ++ cmd = ioctl->cmd; ++ } else { ++ retcode = -EINVAL; ++ goto err_i1; ++ } ++#if 0 ++ /* ++ * This check is disabled, because driver private ioctl->cmd ++ * are not the ioctl commands with size and direction bits but ++ * just the indices. The DRM core ioctl->cmd are the proper ioctl ++ * commands. The drivers' ioctl tables need to be fixed. ++ */ ++ if (ioctl->cmd != cmd) { ++ retcode = -EINVAL; ++ goto err_i1; ++ } ++#endif ++ ++ func = ioctl->func; ++ /* is there a local override? */ ++ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) ++ func = dev->driver->dma_ioctl; ++ ++ if (cmd & IOC_IN) { ++ if (copy_from_user(kdata, (void __user *)arg, ++ _IOC_SIZE(cmd)) != 0) { ++ retcode = -EACCES; ++ goto err_i1; ++ } ++ } ++ ++ if (!func) { ++ DRM_DEBUG("no function\n"); ++ retcode = -EINVAL; ++ } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || ++ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || ++ ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { ++ retcode = -EACCES; ++ } else { ++ retcode = func(dev, kdata, file_priv); ++ } ++ ++ if (cmd & IOC_OUT) { ++ if (copy_to_user((void __user *)arg, kdata, ++ _IOC_SIZE(cmd)) != 0) ++ retcode = -EACCES; ++ } ++ ++err_i1: ++ atomic_dec(&dev->ioctl_count); ++ if (retcode) ++ DRM_DEBUG("ret = %d\n", retcode); ++ return retcode; ++} ++EXPORT_SYMBOL(drm_unlocked_ioctl); ++ ++drm_local_map_t *drm_getsarea(struct drm_device *dev) ++{ ++ struct drm_map_list *entry; ++ ++ list_for_each_entry(entry, &dev->maplist, head) { ++ if (entry->map && entry->map->type == _DRM_SHM && ++ (entry->map->flags & _DRM_CONTAINS_LOCK)) { ++ return entry->map; ++ } ++ } ++ return NULL; ++} ++EXPORT_SYMBOL(drm_getsarea); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_fence.c git-nokia/drivers/gpu/drm-tungsten/drm_fence.c +--- git/drivers/gpu/drm-tungsten/drm_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_fence.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,829 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++ ++/* ++ * Convenience function to be called by fence::wait methods that ++ * need polling. ++ */ ++ ++int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ uint32_t count = 0; ++ int ret; ++ ++ DECLARE_WAITQUEUE(entry, current); ++ add_wait_queue(&fc->fence_queue, &entry); ++ ++ ret = 0; ++ ++ for (;;) { ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ if (drm_fence_object_signaled(fence, mask)) ++ break; ++ if (time_after_eq(jiffies, end_jiffies)) { ++ ret = -EBUSY; ++ break; ++ } ++ if (lazy) ++ schedule_timeout(1); ++ else if ((++count & 0x0F) == 0){ ++ __set_current_state(TASK_RUNNING); ++ schedule(); ++ __set_current_state((interruptible) ? ++ TASK_INTERRUPTIBLE : ++ TASK_UNINTERRUPTIBLE); ++ } ++ if (interruptible && signal_pending(current)) { ++ ret = -EAGAIN; ++ break; ++ } ++ } ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&fc->fence_queue, &entry); ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_wait_polling); ++ ++/* ++ * Typically called by the IRQ handler. ++ */ ++ ++void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, uint32_t error) ++{ ++ int wake = 0; ++ uint32_t diff; ++ uint32_t relevant_type; ++ uint32_t new_type; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct list_head *head; ++ struct drm_fence_object *fence, *next; ++ int found = 0; ++ ++ if (list_empty(&fc->ring)) ++ return; ++ ++ list_for_each_entry(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff > driver->wrap_diff) { ++ found = 1; ++ break; ++ } ++ } ++ ++ fc->waiting_types &= ~type; ++ head = (found) ? &fence->ring : &fc->ring; ++ ++ list_for_each_entry_safe_reverse(fence, next, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ ++ if (error) { ++ fence->error = error; ++ fence->signaled_types = fence->type; ++ list_del_init(&fence->ring); ++ wake = 1; ++ break; ++ } ++ ++ if (type & DRM_FENCE_TYPE_EXE) ++ type |= fence->native_types; ++ ++ relevant_type = type & fence->type; ++ new_type = (fence->signaled_types | relevant_type) ^ ++ fence->signaled_types; ++ ++ if (new_type) { ++ fence->signaled_types |= new_type; ++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", ++ fence->base.hash.key, fence->signaled_types); ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (new_type & fence->waiting_types) ++ wake = 1; ++ } ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ ++ if (!(fence->type & ~fence->signaled_types)) { ++ DRM_DEBUG("Fence completely signaled 0x%08lx\n", ++ fence->base.hash.key); ++ list_del_init(&fence->ring); ++ } ++ } ++ ++ /* ++ * Reinstate lost waiting types. ++ */ ++ ++ if ((fc->waiting_types & type) != type) { ++ head = head->prev; ++ list_for_each_entry(fence, head, ring) { ++ if (&fence->ring == &fc->ring) ++ break; ++ diff = (fc->highest_waiting_sequence - fence->sequence) & ++ driver->sequence_mask; ++ if (diff > driver->wrap_diff) ++ break; ++ ++ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; ++ } ++ } ++ ++ if (wake) ++ wake_up_all(&fc->fence_queue); ++} ++EXPORT_SYMBOL(drm_fence_handler); ++ ++static void drm_fence_unring(struct drm_device *dev, struct list_head *ring) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ unsigned long flags; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ list_del_init(ring); ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_usage_deref_locked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ DRM_DEBUG("Destroyed a fence object 0x%08lx\n", ++ tmp_fence->base.hash.key); ++ atomic_dec(&fm->count); ++ BUG_ON(!list_empty(&tmp_fence->base.list)); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_locked); ++ ++void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) ++{ ++ struct drm_fence_object *tmp_fence = *fence; ++ struct drm_device *dev = tmp_fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ *fence = NULL; ++ if (atomic_dec_and_test(&tmp_fence->usage)) { ++ mutex_lock(&dev->struct_mutex); ++ if (atomic_read(&tmp_fence->usage) == 0) { ++ drm_fence_unring(dev, &tmp_fence->ring); ++ atomic_dec(&fm->count); ++ BUG_ON(!list_empty(&tmp_fence->base.list)); ++ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); ++ ++struct drm_fence_object ++*drm_fence_reference_locked(struct drm_fence_object *src) ++{ ++ DRM_ASSERT_LOCKED(&src->dev->struct_mutex); ++ ++ atomic_inc(&src->usage); ++ return src; ++} ++ ++void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src) ++{ ++ mutex_lock(&src->dev->struct_mutex); ++ *dst = src; ++ atomic_inc(&src->usage); ++ mutex_unlock(&src->dev->struct_mutex); ++} ++EXPORT_SYMBOL(drm_fence_reference_unlocked); ++ ++static void drm_fence_object_destroy(struct drm_file *priv, ++ struct drm_user_object *base) ++{ ++ struct drm_fence_object *fence = ++ drm_user_object_entry(base, struct drm_fence_object, base); ++ ++ drm_fence_usage_deref_locked(&fence); ++} ++ ++int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask) ++{ ++ unsigned long flags; ++ int signaled; ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ ++ mask &= fence->type; ++ read_lock_irqsave(&fm->lock, flags); ++ signaled = (mask & fence->signaled_types) == mask; ++ read_unlock_irqrestore(&fm->lock, flags); ++ if (!signaled && driver->poll) { ++ write_lock_irqsave(&fm->lock, flags); ++ driver->poll(dev, fence->fence_class, mask); ++ signaled = (mask & fence->signaled_types) == mask; ++ write_unlock_irqrestore(&fm->lock, flags); ++ } ++ return signaled; ++} ++EXPORT_SYMBOL(drm_fence_object_signaled); ++ ++ ++int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ unsigned long irq_flags; ++ uint32_t saved_pending_flush; ++ uint32_t diff; ++ int call_flush; ++ ++ if (type & ~fence->type) { ++ DRM_ERROR("Flush trying to extend fence type, " ++ "0x%x, 0x%x\n", type, fence->type); ++ return -EINVAL; ++ } ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ fence->waiting_types |= type; ++ fc->waiting_types |= fence->waiting_types; ++ diff = (fence->sequence - fc->highest_waiting_sequence) & ++ driver->sequence_mask; ++ ++ if (diff < driver->wrap_diff) ++ fc->highest_waiting_sequence = fence->sequence; ++ ++ /* ++ * fence->waiting_types has changed. Determine whether ++ * we need to initiate some kind of flush as a result of this. ++ */ ++ ++ saved_pending_flush = fc->pending_flush; ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ ++ if (driver->poll) ++ driver->poll(dev, fence->fence_class, fence->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_flush); ++ ++/* ++ * Make sure old fence objects are signaled before their fence sequences are ++ * wrapped around and reused. ++ */ ++ ++void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; ++ struct drm_fence_object *fence; ++ unsigned long irq_flags; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ int call_flush; ++ ++ uint32_t diff; ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ ++ list_for_each_entry_reverse(fence, &fc->ring, ring) { ++ diff = (sequence - fence->sequence) & driver->sequence_mask; ++ if (diff <= driver->flush_diff) ++ break; ++ ++ fence->waiting_types = fence->type; ++ fc->waiting_types |= fence->type; ++ ++ if (driver->needed_flush) ++ fc->pending_flush |= driver->needed_flush(fence); ++ } ++ ++ if (driver->poll) ++ driver->poll(dev, fence_class, fc->waiting_types); ++ ++ call_flush = fc->pending_flush; ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ ++ if (call_flush && driver->flush) ++ driver->flush(dev, fence->fence_class); ++ ++ /* ++ * FIXME: Shold we implement a wait here for really old fences? ++ */ ++ ++} ++EXPORT_SYMBOL(drm_fence_flush_old); ++ ++int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ int ret = 0; ++ unsigned long _end = 3 * DRM_HZ; ++ ++ if (mask & ~fence->type) { ++ DRM_ERROR("Wait trying to extend fence type" ++ " 0x%08x 0x%08x\n", mask, fence->type); ++ BUG(); ++ return -EINVAL; ++ } ++ ++ if (driver->wait) ++ return driver->wait(fence, lazy, !ignore_signals, mask); ++ ++ ++ drm_fence_object_flush(fence, mask); ++ if (driver->has_irq(dev, fence->fence_class, mask)) { ++ if (!ignore_signals) ++ ret = wait_event_interruptible_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ else ++ ret = wait_event_timeout ++ (fc->fence_queue, ++ drm_fence_object_signaled(fence, mask), ++ 3 * DRM_HZ); ++ ++ if (unlikely(ret == -ERESTARTSYS)) ++ return -EAGAIN; ++ ++ if (unlikely(ret == 0)) ++ return -EBUSY; ++ ++ return 0; ++ } ++ ++ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask, ++ _end); ++} ++EXPORT_SYMBOL(drm_fence_object_wait); ++ ++ ++ ++int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, ++ uint32_t fence_class, uint32_t type) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; ++ unsigned long flags; ++ uint32_t sequence; ++ uint32_t native_types; ++ int ret; ++ ++ drm_fence_unring(dev, &fence->ring); ++ ret = driver->emit(dev, fence_class, fence_flags, &sequence, ++ &native_types); ++ if (ret) ++ return ret; ++ ++ write_lock_irqsave(&fm->lock, flags); ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->waiting_types = 0; ++ fence->signaled_types = 0; ++ fence->error = 0; ++ fence->sequence = sequence; ++ fence->native_types = native_types; ++ if (list_empty(&fc->ring)) ++ fc->highest_waiting_sequence = sequence - 1; ++ list_add_tail(&fence->ring, &fc->ring); ++ fc->latest_queued_sequence = sequence; ++ write_unlock_irqrestore(&fm->lock, flags); ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_emit); ++ ++static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence) ++{ ++ int ret = 0; ++ unsigned long flags; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ mutex_lock(&dev->struct_mutex); ++ atomic_set(&fence->usage, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ write_lock_irqsave(&fm->lock, flags); ++ INIT_LIST_HEAD(&fence->ring); ++ ++ /* ++ * Avoid hitting BUG() for kernel-only fence objects. ++ */ ++ ++ INIT_LIST_HEAD(&fence->base.list); ++ fence->fence_class = fence_class; ++ fence->type = type; ++ fence->signaled_types = 0; ++ fence->waiting_types = 0; ++ fence->sequence = 0; ++ fence->error = 0; ++ fence->dev = dev; ++ write_unlock_irqrestore(&fm->lock, flags); ++ if (fence_flags & DRM_FENCE_FLAG_EMIT) { ++ ret = drm_fence_object_emit(fence, fence_flags, ++ fence->fence_class, type); ++ } ++ return ret; ++} ++ ++int drm_fence_add_user_object(struct drm_file *priv, ++ struct drm_fence_object *fence, int shareable) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_add_user_object(priv, &fence->base, shareable); ++ if (ret) ++ goto out; ++ atomic_inc(&fence->usage); ++ fence->base.type = drm_fence_type; ++ fence->base.remove = &drm_fence_object_destroy; ++ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); ++out: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++EXPORT_SYMBOL(drm_fence_add_user_object); ++ ++int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, ++ uint32_t type, unsigned flags, ++ struct drm_fence_object **c_fence) ++{ ++ struct drm_fence_object *fence; ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ ++ fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); ++ if (!fence) { ++ DRM_ERROR("Out of memory creating fence object\n"); ++ return -ENOMEM; ++ } ++ ret = drm_fence_object_init(dev, fence_class, type, flags, fence); ++ if (ret) { ++ drm_fence_usage_deref_unlocked(&fence); ++ return ret; ++ } ++ *c_fence = fence; ++ atomic_inc(&fm->count); ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_fence_object_create); ++ ++void drm_fence_manager_init(struct drm_device *dev) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fence_class; ++ struct drm_fence_driver *fed = dev->driver->fence_driver; ++ int i; ++ unsigned long flags; ++ ++ rwlock_init(&fm->lock); ++ write_lock_irqsave(&fm->lock, flags); ++ fm->initialized = 0; ++ if (!fed) ++ goto out_unlock; ++ ++ fm->initialized = 1; ++ fm->num_classes = fed->num_classes; ++ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); ++ ++ for (i = 0; i < fm->num_classes; ++i) { ++ fence_class = &fm->fence_class[i]; ++ ++ memset(fence_class, 0, sizeof(*fence_class)); ++ INIT_LIST_HEAD(&fence_class->ring); ++ DRM_INIT_WAITQUEUE(&fence_class->fence_queue); ++ } ++ ++ atomic_set(&fm->count, 0); ++ out_unlock: ++ write_unlock_irqrestore(&fm->lock, flags); ++} ++ ++void drm_fence_fill_arg(struct drm_fence_object *fence, ++ struct drm_fence_arg *arg) ++{ ++ struct drm_device *dev = fence->dev; ++ struct drm_fence_manager *fm = &dev->fm; ++ unsigned long irq_flags; ++ ++ read_lock_irqsave(&fm->lock, irq_flags); ++ arg->handle = fence->base.hash.key; ++ arg->fence_class = fence->fence_class; ++ arg->type = fence->type; ++ arg->signaled = fence->signaled_types; ++ arg->error = fence->error; ++ arg->sequence = fence->sequence; ++ read_unlock_irqrestore(&fm->lock, irq_flags); ++} ++EXPORT_SYMBOL(drm_fence_fill_arg); ++ ++void drm_fence_manager_takedown(struct drm_device *dev) ++{ ++} ++ ++struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, ++ uint32_t handle) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_fence_object *fence; ++ ++ mutex_lock(&dev->struct_mutex); ++ uo = drm_lookup_user_object(priv, handle); ++ if (!uo || (uo->type != drm_fence_type)) { ++ mutex_unlock(&dev->struct_mutex); ++ return NULL; ++ } ++ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); ++ mutex_unlock(&dev->struct_mutex); ++ return fence; ++} ++ ++int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ if (arg->flags & DRM_FENCE_FLAG_EMIT) ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ret = drm_fence_object_create(dev, arg->fence_class, ++ arg->type, arg->flags, &fence); ++ if (ret) ++ return ret; ++ ret = drm_fence_add_user_object(file_priv, fence, ++ arg->flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (ret) { ++ drm_fence_usage_deref_unlocked(&fence); ++ return ret; ++ } ++ ++ /* ++ * usage > 0. No need to lock dev->struct_mutex; ++ */ ++ ++ arg->handle = fence->base.hash.key; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ struct drm_user_object *uo; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); ++ if (ret) ++ return ret; ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); ++} ++ ++int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_flush(fence, arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_wait(fence, ++ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, ++ 0, arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++ ++int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ fence = drm_lookup_fence_object(file_priv, arg->handle); ++ if (!fence) ++ return -EINVAL; ++ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, ++ arg->type); ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} ++ ++int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ int ret; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_arg *arg = data; ++ struct drm_fence_object *fence; ++ ret = 0; ++ ++ if (!fm->initialized) { ++ DRM_ERROR("The DRM driver does not support fencing.\n"); ++ return -EINVAL; ++ } ++ ++ if (!dev->bm.initialized) { ++ DRM_ERROR("Buffer object manager is not initialized\n"); ++ return -EINVAL; ++ } ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ret = drm_fence_buffer_objects(dev, NULL, arg->flags, ++ NULL, &fence); ++ if (ret) ++ return ret; ++ ++ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { ++ ret = drm_fence_add_user_object(file_priv, fence, ++ arg->flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (ret) ++ return ret; ++ } ++ ++ arg->handle = fence->base.hash.key; ++ ++ drm_fence_fill_arg(fence, arg); ++ drm_fence_usage_deref_unlocked(&fence); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_fops.c git-nokia/drivers/gpu/drm-tungsten/drm_fops.c +--- git/drivers/gpu/drm-tungsten/drm_fops.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_fops.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,532 @@ ++/** ++ * \file drm_fops.c ++ * File operations for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Daryll Strauss ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm_sarea.h" ++#include ++ ++static int drm_open_helper(struct inode *inode, struct file *filp, ++ struct drm_device * dev); ++ ++static int drm_setup(struct drm_device * dev) ++{ ++ drm_local_map_t *map; ++ int i; ++ int ret; ++ int sareapage; ++ ++ if (dev->driver->firstopen) { ++ ret = dev->driver->firstopen(dev); ++ if (ret != 0) ++ return ret; ++ } ++ ++ dev->magicfree.next = NULL; ++ ++ /* prebuild the SAREA */ ++ sareapage = max(SAREA_MAX, PAGE_SIZE); ++ i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); ++ if (i != 0) ++ return i; ++ ++ atomic_set(&dev->ioctl_count, 0); ++ atomic_set(&dev->vma_count, 0); ++ dev->buf_use = 0; ++ atomic_set(&dev->buf_alloc, 0); ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { ++ i = drm_dma_setup(dev); ++ if (i < 0) ++ return i; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(dev->counts); i++) ++ atomic_set(&dev->counts[i], 0); ++ ++ drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); ++ INIT_LIST_HEAD(&dev->magicfree); ++ ++ dev->sigdata.lock = NULL; ++ init_waitqueue_head(&dev->lock.lock_queue); ++ dev->queue_count = 0; ++ dev->queue_reserved = 0; ++ dev->queue_slots = 0; ++ dev->queuelist = NULL; ++ dev->context_flag = 0; ++ dev->interrupt_flag = 0; ++ dev->dma_flag = 0; ++ dev->last_context = 0; ++ dev->last_switch = 0; ++ dev->last_checked = 0; ++ init_waitqueue_head(&dev->context_wait); ++ dev->if_version = 0; ++ ++ dev->ctx_start = 0; ++ dev->lck_start = 0; ++ ++ dev->buf_async = NULL; ++ init_waitqueue_head(&dev->buf_readers); ++ init_waitqueue_head(&dev->buf_writers); ++ ++ DRM_DEBUG("\n"); ++ ++ /* ++ * The kernel's context could be created here, but is now created ++ * in drm_dma_enqueue. This is more resource-efficient for ++ * hardware that does not do DMA, but may mean that ++ * drm_select_queue fails between the time the interrupt is ++ * initialized and the time the queues are initialized. ++ */ ++ ++ return 0; ++} ++ ++/** ++ * Open file. ++ * ++ * \param inode device inode ++ * \param filp file pointer. ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches the DRM device with the same minor number, calls open_helper(), and ++ * increments the device open count. If the open count was previous at zero, ++ * i.e., it's the first that the device is open, then calls setup(). ++ */ ++int drm_open(struct inode *inode, struct file *filp) ++{ ++ struct drm_device *dev = NULL; ++ int minor_id = iminor(inode); ++ struct drm_minor *minor; ++ int retcode = 0; ++ ++ minor = idr_find(&drm_minors_idr, minor_id); ++ if (!minor) ++ return -ENODEV; ++ ++ if (!(dev = minor->dev)) ++ return -ENODEV; ++ ++ retcode = drm_open_helper(inode, filp, dev); ++ if (!retcode) { ++ atomic_inc(&dev->counts[_DRM_STAT_OPENS]); ++ spin_lock(&dev->count_lock); ++ if (!dev->open_count++) { ++ spin_unlock(&dev->count_lock); ++ retcode = drm_setup(dev); ++ goto out; ++ } ++ spin_unlock(&dev->count_lock); ++ } ++ ++out: ++ mutex_lock(&dev->struct_mutex); ++ BUG_ON((dev->dev_mapping != NULL) && ++ (dev->dev_mapping != inode->i_mapping)); ++ if (dev->dev_mapping == NULL) ++ dev->dev_mapping = inode->i_mapping; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return retcode; ++} ++EXPORT_SYMBOL(drm_open); ++ ++/** ++ * File \c open operation. ++ * ++ * \param inode device inode. ++ * \param filp file pointer. ++ * ++ * Puts the dev->fops corresponding to the device minor number into ++ * \p filp, call the \c open method, and restore the file operations. ++ */ ++int drm_stub_open(struct inode *inode, struct file *filp) ++{ ++ struct drm_device *dev = NULL; ++ struct drm_minor *minor; ++ int minor_id = iminor(inode); ++ int err = -ENODEV; ++ const struct file_operations *old_fops; ++ ++ DRM_DEBUG("\n"); ++ ++ minor = idr_find(&drm_minors_idr, minor_id); ++ if (!minor) ++ return -ENODEV; ++ ++ if (!(dev = minor->dev)) ++ return -ENODEV; ++ ++ old_fops = filp->f_op; ++ filp->f_op = fops_get(&dev->driver->fops); ++ if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { ++ fops_put(filp->f_op); ++ filp->f_op = fops_get(old_fops); ++ } ++ fops_put(old_fops); ++ ++ return err; ++} ++ ++/** ++ * Check whether DRI will run on this CPU. ++ * ++ * \return non-zero if the DRI will run on this CPU, or zero otherwise. ++ */ ++static int drm_cpu_valid(void) ++{ ++#if defined(__i386__) ++ if (boot_cpu_data.x86 == 3) ++ return 0; /* No cmpxchg on a 386 */ ++#endif ++#if defined(__sparc__) && !defined(__sparc_v9__) ++ return 0; /* No cmpxchg before v9 sparc. */ ++#endif ++ return 1; ++} ++ ++/** ++ * Called whenever a process opens /dev/drm. ++ * ++ * \param inode device inode. ++ * \param filp file pointer. ++ * \param dev device. ++ * \return zero on success or a negative number on failure. ++ * ++ * Creates and initializes a drm_file structure for the file private data in \p ++ * filp and add it into the double linked list in \p dev. ++ */ ++static int drm_open_helper(struct inode *inode, struct file *filp, ++ struct drm_device * dev) ++{ ++ int minor_id = iminor(inode); ++ struct drm_file *priv; ++ int ret; ++ int i, j; ++ ++ if (filp->f_flags & O_EXCL) ++ return -EBUSY; /* No exclusive opens */ ++ if (!drm_cpu_valid()) ++ return -EINVAL; ++ ++ DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor_id); ++ ++ priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); ++ if (!priv) ++ return -ENOMEM; ++ ++ memset(priv, 0, sizeof(*priv)); ++ filp->private_data = priv; ++ priv->filp = filp; ++ priv->uid = current->euid; ++ priv->pid = current->pid; ++ priv->minor = idr_find(&drm_minors_idr, minor_id); ++ priv->ioctl_count = 0; ++ /* for compatibility root is always authenticated */ ++ priv->authenticated = capable(CAP_SYS_ADMIN); ++ priv->lock_count = 0; ++ ++ INIT_LIST_HEAD(&priv->lhead); ++ INIT_LIST_HEAD(&priv->refd_objects); ++ ++ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { ++ ret = drm_ht_create(&priv->refd_object_hash[i], ++ DRM_FILE_HASH_ORDER); ++ if (ret) ++ break; ++ } ++ ++ if (ret) { ++ for (j = 0; j < i; ++j) ++ drm_ht_remove(&priv->refd_object_hash[j]); ++ goto out_free; ++ } ++ ++ if (dev->driver->driver_features & DRIVER_GEM) ++ drm_gem_open(dev, priv); ++ ++ if (dev->driver->open) { ++ ret = dev->driver->open(dev, priv); ++ if (ret < 0) ++ goto out_free; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ if (list_empty(&dev->filelist)) ++ priv->master = 1; ++ ++ list_add(&priv->lhead, &dev->filelist); ++ mutex_unlock(&dev->struct_mutex); ++ ++#ifdef __alpha__ ++ /* ++ * Default the hose ++ */ ++ if (!dev->hose) { ++ struct pci_dev *pci_dev; ++ pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); ++ if (pci_dev) { ++ dev->hose = pci_dev->sysdata; ++ pci_dev_put(pci_dev); ++ } ++ if (!dev->hose) { ++ struct pci_bus *b = pci_bus_b(pci_root_buses.next); ++ if (b) ++ dev->hose = b->sysdata; ++ } ++ } ++#endif ++ ++ return 0; ++ out_free: ++ drm_free(priv, sizeof(*priv), DRM_MEM_FILES); ++ filp->private_data = NULL; ++ return ret; ++} ++ ++/** No-op. */ ++int drm_fasync(int fd, struct file *filp, int on) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ int retcode; ++ ++ DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, ++ (long)old_encode_dev(priv->minor->device)); ++ retcode = fasync_helper(fd, filp, on, &dev->buf_async); ++ if (retcode < 0) ++ return retcode; ++ return 0; ++} ++EXPORT_SYMBOL(drm_fasync); ++ ++static void drm_object_release(struct file *filp) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct list_head *head; ++ struct drm_ref_object *ref_object; ++ int i; ++ ++ /* ++ * Free leftover ref objects created by me. Note that we cannot use ++ * list_for_each() here, as the struct_mutex may be temporarily ++ * released by the remove_() functions, and thus the lists may be ++ * altered. ++ * Also, a drm_remove_ref_object() will not remove it ++ * from the list unless its refcount is 1. ++ */ ++ ++ head = &priv->refd_objects; ++ while (head->next != head) { ++ ref_object = list_entry(head->next, struct drm_ref_object, list); ++ drm_remove_ref_object(priv, ref_object); ++ head = &priv->refd_objects; ++ } ++ ++ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) ++ drm_ht_remove(&priv->refd_object_hash[i]); ++} ++ ++/** ++ * Release file. ++ * ++ * \param inode device inode ++ * \param file_priv DRM file private. ++ * \return zero on success or a negative number on failure. ++ * ++ * If the hardware lock is held then free it, and take it again for the kernel ++ * context since it's necessary to reclaim buffers. Unlink the file private ++ * data from its list and free it. Decreases the open count and if it reaches ++ * zero calls drm_lastclose(). ++ */ ++int drm_release(struct inode *inode, struct file *filp) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ int retcode = 0; ++ ++ lock_kernel(); ++ ++ DRM_DEBUG("open_count = %d\n", dev->open_count); ++ ++ if (dev->driver->preclose) ++ dev->driver->preclose(dev, file_priv); ++ ++ /* ======================================================== ++ * Begin inline drm_release ++ */ ++ ++ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", ++ current->pid, (long)old_encode_dev(file_priv->minor->device), ++ dev->open_count); ++ ++ if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ } else { ++ unsigned long _end=jiffies + 3*DRM_HZ; ++ int locked = 0; ++ ++ drm_idlelock_take(&dev->lock); ++ ++ /* ++ * Wait for a while. ++ */ ++ ++ do{ ++ spin_lock_bh(&dev->lock.spinlock); ++ locked = dev->lock.idle_has_lock; ++ spin_unlock_bh(&dev->lock.spinlock); ++ if (locked) ++ break; ++ schedule(); ++ } while (!time_after_eq(jiffies, _end)); ++ ++ if (!locked) { ++ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" ++ "\tdriver to use reclaim_buffers_idlelocked() instead.\n" ++ "\tI will go on reclaiming the buffers anyway.\n"); ++ } ++ ++ dev->driver->reclaim_buffers_locked(dev, file_priv); ++ drm_idlelock_release(&dev->lock); ++ } ++ } ++ ++ if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { ++ ++ drm_idlelock_take(&dev->lock); ++ dev->driver->reclaim_buffers_idlelocked(dev, file_priv); ++ drm_idlelock_release(&dev->lock); ++ ++ } ++ ++ if (drm_i_have_hw_lock(dev, file_priv)) { ++ DRM_DEBUG("File %p released, freeing lock for context %d\n", ++ filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ++ ++ drm_lock_free(&dev->lock, ++ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); ++ } ++ ++ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && ++ !dev->driver->reclaim_buffers_locked) { ++ dev->driver->reclaim_buffers(dev, file_priv); ++ } ++ ++ if (dev->driver->driver_features & DRIVER_GEM) ++ drm_gem_release(dev, file_priv); ++ ++ drm_fasync(-1, filp, 0); ++ ++ mutex_lock(&dev->ctxlist_mutex); ++ ++ if (!list_empty(&dev->ctxlist)) { ++ struct drm_ctx_list *pos, *n; ++ ++ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { ++ if (pos->tag == file_priv && ++ pos->handle != DRM_KERNEL_CONTEXT) { ++ if (dev->driver->context_dtor) ++ dev->driver->context_dtor(dev, ++ pos->handle); ++ ++ drm_ctxbitmap_free(dev, pos->handle); ++ ++ list_del(&pos->head); ++ drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); ++ --dev->ctx_count; ++ } ++ } ++ } ++ mutex_unlock(&dev->ctxlist_mutex); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_object_release(filp); ++ if (file_priv->remove_auth_on_close == 1) { ++ struct drm_file *temp; ++ ++ list_for_each_entry(temp, &dev->filelist, lhead) ++ temp->authenticated = 0; ++ } ++ list_del(&file_priv->lhead); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (dev->driver->postclose) ++ dev->driver->postclose(dev, file_priv); ++ drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); ++ ++ /* ======================================================== ++ * End inline drm_release ++ */ ++ ++ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); ++ spin_lock(&dev->count_lock); ++ if (!--dev->open_count) { ++ if (atomic_read(&dev->ioctl_count) || dev->blocked) { ++ DRM_ERROR("Device busy: %d %d\n", ++ atomic_read(&dev->ioctl_count), dev->blocked); ++ spin_unlock(&dev->count_lock); ++ unlock_kernel(); ++ return -EBUSY; ++ } ++ spin_unlock(&dev->count_lock); ++ unlock_kernel(); ++ return drm_lastclose(dev); ++ } ++ spin_unlock(&dev->count_lock); ++ ++ unlock_kernel(); ++ ++ return retcode; ++} ++EXPORT_SYMBOL(drm_release); ++ ++/** No-op. */ ++/* This is to deal with older X servers that believe 0 means data is ++ * available which is not the correct return for a poll function. ++ * This cannot be fixed until the Xserver is fixed. Xserver will need ++ * to set a newer interface version to avoid breaking older Xservers. ++ * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22" ++ * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try ++ * to return the correct response. ++ */ ++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) ++{ ++ /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */ ++ return 0; ++} ++EXPORT_SYMBOL(drm_poll); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_gem.c git-nokia/drivers/gpu/drm-tungsten/drm_gem.c +--- git/drivers/gpu/drm-tungsten/drm_gem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_gem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,444 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include ++ ++#include "drmP.h" ++ ++#if OS_HAS_GEM ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** @file drm_gem.c ++ * ++ * This file provides some of the base ioctls and library routines for ++ * the graphics memory manager implemented by each device driver. ++ * ++ * Because various devices have different requirements in terms of ++ * synchronization and migration strategies, implementing that is left up to ++ * the driver, and all that the general API provides should be generic -- ++ * allocating objects, reading/writing data with the cpu, freeing objects. ++ * Even there, platform-dependent optimizations for reading/writing data with ++ * the CPU mean we'll likely hook those out to driver-specific calls. However, ++ * the DRI2 implementation wants to have at least allocate/mmap be generic. ++ * ++ * The goal was to have swap-backed object allocation managed through ++ * struct file. However, file descriptors as handles to a struct file have ++ * two major failings: ++ * - Process limits prevent more than 1024 or so being used at a time by ++ * default. ++ * - Inability to allocate high fds will aggravate the X Server's select() ++ * handling, and likely that of many GL client applications as well. ++ * ++ * This led to a plan of using our own integer IDs (called handles, following ++ * DRM terminology) to mimic fds, and implement the fd syscalls we need as ++ * ioctls. The objects themselves will still include the struct file so ++ * that we can transition to fds if the required kernel infrastructure shows ++ * up at a later date, and as our interface with shmfs for memory allocation. ++ */ ++ ++/** ++ * Initialize the GEM device fields ++ */ ++ ++int ++drm_gem_init(struct drm_device *dev) ++{ ++ spin_lock_init(&dev->object_name_lock); ++ idr_init(&dev->object_name_idr); ++ atomic_set(&dev->object_count, 0); ++ atomic_set(&dev->object_memory, 0); ++ atomic_set(&dev->pin_count, 0); ++ atomic_set(&dev->pin_memory, 0); ++ atomic_set(&dev->gtt_count, 0); ++ atomic_set(&dev->gtt_memory, 0); ++ return 0; ++} ++ ++/** ++ * Allocate a GEM object of the specified size with shmfs backing store ++ */ ++struct drm_gem_object * ++drm_gem_object_alloc(struct drm_device *dev, size_t size) ++{ ++ struct drm_gem_object *obj; ++ ++ BUG_ON((size & (PAGE_SIZE - 1)) != 0); ++ ++ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); ++ ++ obj->dev = dev; ++ obj->filp = shmem_file_setup("drm mm object", size, 0); ++ if (IS_ERR(obj->filp)) { ++ kfree(obj); ++ return NULL; ++ } ++ ++ kref_init(&obj->refcount); ++ kref_init(&obj->handlecount); ++ obj->size = size; ++ if (dev->driver->gem_init_object != NULL && ++ dev->driver->gem_init_object(obj) != 0) { ++ fput(obj->filp); ++ kfree(obj); ++ return NULL; ++ } ++ atomic_inc(&dev->object_count); ++ atomic_add(obj->size, &dev->object_memory); ++ return obj; ++} ++EXPORT_SYMBOL(drm_gem_object_alloc); ++ ++/** ++ * Removes the mapping from handle to filp for this object. ++ */ ++static int ++drm_gem_handle_delete(struct drm_file *filp, int handle) ++{ ++ struct drm_device *dev; ++ struct drm_gem_object *obj; ++ ++ /* This is gross. The idr system doesn't let us try a delete and ++ * return an error code. It just spews if you fail at deleting. ++ * So, we have to grab a lock around finding the object and then ++ * doing the delete on it and dropping the refcount, or the user ++ * could race us to double-decrement the refcount and cause a ++ * use-after-free later. Given the frequency of our handle lookups, ++ * we may want to use ida for number allocation and a hash table ++ * for the pointers, anyway. ++ */ ++ spin_lock(&filp->table_lock); ++ ++ /* Check if we currently have a reference on the object */ ++ obj = idr_find(&filp->object_idr, handle); ++ if (obj == NULL) { ++ spin_unlock(&filp->table_lock); ++ return -EINVAL; ++ } ++ dev = obj->dev; ++ ++ /* Release reference and decrement refcount. */ ++ idr_remove(&filp->object_idr, handle); ++ spin_unlock(&filp->table_lock); ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Create a handle for this object. This adds a handle reference ++ * to the object, which includes a regular reference count. Callers ++ * will likely want to dereference the object afterwards. ++ */ ++int ++drm_gem_handle_create(struct drm_file *file_priv, ++ struct drm_gem_object *obj, ++ int *handlep) ++{ ++ int ret; ++ ++ /* ++ * Get the user-visible handle using idr. ++ */ ++again: ++ /* ensure there is space available to allocate a handle */ ++ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) ++ return -ENOMEM; ++ ++ /* do the allocation under our spinlock */ ++ spin_lock(&file_priv->table_lock); ++ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); ++ spin_unlock(&file_priv->table_lock); ++ if (ret == -EAGAIN) ++ goto again; ++ ++ if (ret != 0) ++ return ret; ++ ++ drm_gem_object_handle_reference(obj); ++ return 0; ++} ++EXPORT_SYMBOL(drm_gem_handle_create); ++ ++/** Returns a reference to the object named by the handle. */ ++struct drm_gem_object * ++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, ++ int handle) ++{ ++ struct drm_gem_object *obj; ++ ++ spin_lock(&filp->table_lock); ++ ++ /* Check if we currently have a reference on the object */ ++ obj = idr_find(&filp->object_idr, handle); ++ if (obj == NULL) { ++ spin_unlock(&filp->table_lock); ++ return NULL; ++ } ++ ++ drm_gem_object_reference(obj); ++ ++ spin_unlock(&filp->table_lock); ++ ++ return obj; ++} ++EXPORT_SYMBOL(drm_gem_object_lookup); ++ ++/** ++ * Releases the handle to an mm object. ++ */ ++int ++drm_gem_close_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_close *args = data; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ ret = drm_gem_handle_delete(file_priv, args->handle); ++ ++ return ret; ++} ++ ++/** ++ * Create a global name for an object, returning the name. ++ * ++ * Note that the name does not hold a reference; when the object ++ * is freed, the name goes away. ++ */ ++int ++drm_gem_flink_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_flink *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ ++again: ++ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) ++ return -ENOMEM; ++ ++ spin_lock(&dev->object_name_lock); ++ if (obj->name) { ++ spin_unlock(&dev->object_name_lock); ++ return -EEXIST; ++ } ++ ret = idr_get_new_above(&dev->object_name_idr, obj, 1, ++ &obj->name); ++ spin_unlock(&dev->object_name_lock); ++ if (ret == -EAGAIN) ++ goto again; ++ ++ if (ret != 0) { ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ /* ++ * Leave the reference from the lookup around as the ++ * name table now holds one ++ */ ++ args->name = (uint64_t) obj->name; ++ ++ return 0; ++} ++ ++/** ++ * Open an object using the global name, returning a handle and the size. ++ * ++ * This handle (of course) holds a reference to the object, so the object ++ * will not go away until the handle is deleted. ++ */ ++int ++drm_gem_open_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_gem_open *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ int handle; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ spin_lock(&dev->object_name_lock); ++ obj = idr_find(&dev->object_name_idr, (int) args->name); ++ if (obj) ++ drm_gem_object_reference(obj); ++ spin_unlock(&dev->object_name_lock); ++ if (!obj) ++ return -ENOENT; ++ ++ ret = drm_gem_handle_create(file_priv, obj, &handle); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret) ++ return ret; ++ ++ args->handle = handle; ++ args->size = obj->size; ++ ++ return 0; ++} ++ ++/** ++ * Called at device open time, sets up the structure for handling refcounting ++ * of mm objects. ++ */ ++void ++drm_gem_open(struct drm_device *dev, struct drm_file *file_private) ++{ ++ idr_init(&file_private->object_idr); ++ spin_lock_init(&file_private->table_lock); ++} ++ ++/** ++ * Called at device close to release the file's ++ * handle references on objects. ++ */ ++static int ++drm_gem_object_release_handle(int id, void *ptr, void *data) ++{ ++ struct drm_gem_object *obj = ptr; ++ ++ drm_gem_object_handle_unreference(obj); ++ ++ return 0; ++} ++ ++/** ++ * Called at close time when the filp is going away. ++ * ++ * Releases any remaining references on objects by this filp. ++ */ ++void ++drm_gem_release(struct drm_device *dev, struct drm_file *file_private) ++{ ++ mutex_lock(&dev->struct_mutex); ++ idr_for_each(&file_private->object_idr, ++ &drm_gem_object_release_handle, NULL); ++ ++ idr_destroy(&file_private->object_idr); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Called after the last reference to the object has been lost. ++ * ++ * Frees the object ++ */ ++void ++drm_gem_object_free(struct kref *kref) ++{ ++ struct drm_gem_object *obj = (struct drm_gem_object *) kref; ++ struct drm_device *dev = obj->dev; ++ ++ BUG_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ if (dev->driver->gem_free_object != NULL) ++ dev->driver->gem_free_object(obj); ++ ++ fput(obj->filp); ++ atomic_dec(&dev->object_count); ++ atomic_sub(obj->size, &dev->object_memory); ++ kfree(obj); ++} ++EXPORT_SYMBOL(drm_gem_object_free); ++ ++/** ++ * Called after the last handle to the object has been closed ++ * ++ * Removes any name for the object. Note that this must be ++ * called before drm_gem_object_free or we'll be touching ++ * freed memory ++ */ ++void ++drm_gem_object_handle_free(struct kref *kref) ++{ ++ struct drm_gem_object *obj = container_of(kref, ++ struct drm_gem_object, ++ handlecount); ++ struct drm_device *dev = obj->dev; ++ ++ /* Remove any name for this object */ ++ spin_lock(&dev->object_name_lock); ++ if (obj->name) { ++ idr_remove(&dev->object_name_idr, obj->name); ++ spin_unlock(&dev->object_name_lock); ++ /* ++ * The object name held a reference to this object, drop ++ * that now. ++ */ ++ drm_gem_object_unreference(obj); ++ } else ++ spin_unlock(&dev->object_name_lock); ++ ++} ++EXPORT_SYMBOL(drm_gem_object_handle_free); ++ ++#else ++ ++int drm_gem_init(struct drm_device *dev) ++{ ++ return 0; ++} ++ ++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) ++{ ++ ++} ++ ++void ++drm_gem_release(struct drm_device *dev, struct drm_file *file_private) ++{ ++ ++} ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm.h git-nokia/drivers/gpu/drm-tungsten/drm.h +--- git/drivers/gpu/drm-tungsten/drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1145 @@ ++/** ++ * \file drm.h ++ * Header for the Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * ++ * \par Acknowledgments: ++ * Dec 1999, Richard Henderson , move to generic \c cmpxchg. ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/** ++ * \mainpage ++ * ++ * The Direct Rendering Manager (DRM) is a device-independent kernel-level ++ * device driver that provides support for the XFree86 Direct Rendering ++ * Infrastructure (DRI). ++ * ++ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major ++ * ways: ++ * -# The DRM provides synchronized access to the graphics hardware via ++ * the use of an optimized two-tiered lock. ++ * -# The DRM enforces the DRI security policy for access to the graphics ++ * hardware by only allowing authenticated X11 clients access to ++ * restricted regions of memory. ++ * -# The DRM provides a generic DMA engine, complete with multiple ++ * queues and the ability to detect the need for an OpenGL context ++ * switch. ++ * -# The DRM is extensible via the use of small device-specific modules ++ * that rely extensively on the API exported by the DRM module. ++ * ++ */ ++ ++#ifndef _DRM_H_ ++#define _DRM_H_ ++ ++#ifndef __user ++#define __user ++#endif ++#ifndef __iomem ++#define __iomem ++#endif ++ ++#ifdef __GNUC__ ++# define DEPRECATED __attribute__ ((deprecated)) ++#else ++# define DEPRECATED ++#endif ++ ++#if defined(__linux__) ++#include /* For _IO* macros */ ++#define DRM_IOCTL_NR(n) _IOC_NR(n) ++#define DRM_IOC_VOID _IOC_NONE ++#define DRM_IOC_READ _IOC_READ ++#define DRM_IOC_WRITE _IOC_WRITE ++#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE ++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) ++#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) ++#include ++#define DRM_IOCTL_NR(n) ((n) & 0xff) ++#define DRM_IOC_VOID IOC_VOID ++#define DRM_IOC_READ IOC_OUT ++#define DRM_IOC_WRITE IOC_IN ++#define DRM_IOC_READWRITE IOC_INOUT ++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) ++#endif ++ ++#ifdef __OpenBSD__ ++#define DRM_MAJOR 81 ++#endif ++#if defined(__linux__) || defined(__NetBSD__) ++#define DRM_MAJOR 226 ++#endif ++#define DRM_MAX_MINOR 15 ++ ++#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ ++#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ ++#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ ++#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ ++ ++#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ ++#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ ++#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) ++#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) ++#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) ++ ++#if defined(__linux__) ++typedef unsigned int drm_handle_t; ++#else ++#include ++typedef unsigned long drm_handle_t; /**< To mapped regions */ ++#endif ++typedef unsigned int drm_context_t; /**< GLXContext handle */ ++typedef unsigned int drm_drawable_t; ++typedef unsigned int drm_magic_t; /**< Magic for authentication */ ++ ++/** ++ * Cliprect. ++ * ++ * \warning If you change this structure, make sure you change ++ * XF86DRIClipRectRec in the server as well ++ * ++ * \note KW: Actually it's illegal to change either for ++ * backwards-compatibility reasons. ++ */ ++struct drm_clip_rect { ++ unsigned short x1; ++ unsigned short y1; ++ unsigned short x2; ++ unsigned short y2; ++}; ++ ++/** ++ * Texture region, ++ */ ++struct drm_tex_region { ++ unsigned char next; ++ unsigned char prev; ++ unsigned char in_use; ++ unsigned char padding; ++ unsigned int age; ++}; ++ ++/** ++ * Hardware lock. ++ * ++ * The lock structure is a simple cache-line aligned integer. To avoid ++ * processor bus contention on a multiprocessor system, there should not be any ++ * other data stored in the same cache line. ++ */ ++struct drm_hw_lock { ++ __volatile__ unsigned int lock; /**< lock variable */ ++ char padding[60]; /**< Pad to cache line */ ++}; ++ ++/* This is beyond ugly, and only works on GCC. However, it allows me to use ++ * drm.h in places (i.e., in the X-server) where I can't use size_t. The real ++ * fix is to use uint32_t instead of size_t, but that fix will break existing ++ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will* ++ * eventually happen, though. I chose 'unsigned long' to be the fallback type ++ * because that works on all the platforms I know about. Hopefully, the ++ * real fix will happen before that bites us. ++ */ ++ ++#ifdef __SIZE_TYPE__ ++# define DRM_SIZE_T __SIZE_TYPE__ ++#else ++# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!" ++# define DRM_SIZE_T unsigned long ++#endif ++ ++/** ++ * DRM_IOCTL_VERSION ioctl argument type. ++ * ++ * \sa drmGetVersion(). ++ */ ++struct drm_version { ++ int version_major; /**< Major version */ ++ int version_minor; /**< Minor version */ ++ int version_patchlevel; /**< Patch level */ ++ DRM_SIZE_T name_len; /**< Length of name buffer */ ++ char __user *name; /**< Name of driver */ ++ DRM_SIZE_T date_len; /**< Length of date buffer */ ++ char __user *date; /**< User-space buffer to hold date */ ++ DRM_SIZE_T desc_len; /**< Length of desc buffer */ ++ char __user *desc; /**< User-space buffer to hold desc */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_UNIQUE ioctl argument type. ++ * ++ * \sa drmGetBusid() and drmSetBusId(). ++ */ ++struct drm_unique { ++ DRM_SIZE_T unique_len; /**< Length of unique */ ++ char __user *unique; /**< Unique name for driver instantiation */ ++}; ++ ++#undef DRM_SIZE_T ++ ++struct drm_list { ++ int count; /**< Length of user-space structures */ ++ struct drm_version __user *version; ++}; ++ ++struct drm_block { ++ int unused; ++}; ++ ++/** ++ * DRM_IOCTL_CONTROL ioctl argument type. ++ * ++ * \sa drmCtlInstHandler() and drmCtlUninstHandler(). ++ */ ++struct drm_control { ++ enum { ++ DRM_ADD_COMMAND, ++ DRM_RM_COMMAND, ++ DRM_INST_HANDLER, ++ DRM_UNINST_HANDLER ++ } func; ++ int irq; ++}; ++ ++/** ++ * Type of memory to map. ++ */ ++enum drm_map_type { ++ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ ++ _DRM_REGISTERS = 1, /**< no caching, no core dump */ ++ _DRM_SHM = 2, /**< shared, cached */ ++ _DRM_AGP = 3, /**< AGP/GART */ ++ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ ++ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ ++ _DRM_TTM = 6 ++}; ++ ++/** ++ * Memory mapping flags. ++ */ ++enum drm_map_flags { ++ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ ++ _DRM_READ_ONLY = 0x02, ++ _DRM_LOCKED = 0x04, /**< shared, cached, locked */ ++ _DRM_KERNEL = 0x08, /**< kernel requires access */ ++ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ ++ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ ++ _DRM_REMOVABLE = 0x40, /**< Removable mapping */ ++ _DRM_DRIVER = 0x80 /**< Managed by driver */ ++}; ++ ++struct drm_ctx_priv_map { ++ unsigned int ctx_id; /**< Context requesting private mapping */ ++ void *handle; /**< Handle of map */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls ++ * argument type. ++ * ++ * \sa drmAddMap(). ++ */ ++struct drm_map { ++ unsigned long offset; /**< Requested physical address (0 for SAREA)*/ ++ unsigned long size; /**< Requested physical size (bytes) */ ++ enum drm_map_type type; /**< Type of memory to map */ ++ enum drm_map_flags flags; /**< Flags */ ++ void *handle; /**< User-space: "Handle" to pass to mmap() */ ++ /**< Kernel-space: kernel-virtual address */ ++ int mtrr; /**< MTRR slot used */ ++ /* Private data */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_CLIENT ioctl argument type. ++ */ ++struct drm_client { ++ int idx; /**< Which client desired? */ ++ int auth; /**< Is client authenticated? */ ++ unsigned long pid; /**< Process ID */ ++ unsigned long uid; /**< User ID */ ++ unsigned long magic; /**< Magic */ ++ unsigned long iocs; /**< Ioctl count */ ++}; ++ ++enum drm_stat_type { ++ _DRM_STAT_LOCK, ++ _DRM_STAT_OPENS, ++ _DRM_STAT_CLOSES, ++ _DRM_STAT_IOCTLS, ++ _DRM_STAT_LOCKS, ++ _DRM_STAT_UNLOCKS, ++ _DRM_STAT_VALUE, /**< Generic value */ ++ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ ++ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ ++ ++ _DRM_STAT_IRQ, /**< IRQ */ ++ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */ ++ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ ++ _DRM_STAT_DMA, /**< DMA */ ++ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ ++ _DRM_STAT_MISSED /**< Missed DMA opportunity */ ++ /* Add to the *END* of the list */ ++}; ++ ++/** ++ * DRM_IOCTL_GET_STATS ioctl argument type. ++ */ ++struct drm_stats { ++ unsigned long count; ++ struct { ++ unsigned long value; ++ enum drm_stat_type type; ++ } data[15]; ++}; ++ ++/** ++ * Hardware locking flags. ++ */ ++enum drm_lock_flags { ++ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ ++ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ ++ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ ++ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ ++ /* These *HALT* flags aren't supported yet ++ -- they will be used to support the ++ full-screen DGA-like mode. */ ++ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ ++ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ ++}; ++ ++/** ++ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. ++ * ++ * \sa drmGetLock() and drmUnlock(). ++ */ ++struct drm_lock { ++ int context; ++ enum drm_lock_flags flags; ++}; ++ ++/** ++ * DMA flags ++ * ++ * \warning ++ * These values \e must match xf86drm.h. ++ * ++ * \sa drm_dma. ++ */ ++enum drm_dma_flags { ++ /* Flags for DMA buffer dispatch */ ++ _DRM_DMA_BLOCK = 0x01, /**< ++ * Block until buffer dispatched. ++ * ++ * \note The buffer may not yet have ++ * been processed by the hardware -- ++ * getting a hardware lock with the ++ * hardware quiescent will ensure ++ * that the buffer has been ++ * processed. ++ */ ++ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ ++ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ ++ ++ /* Flags for DMA buffer request */ ++ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ ++ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ ++ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ ++}; ++ ++/** ++ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. ++ * ++ * \sa drmAddBufs(). ++ */ ++struct drm_buf_desc { ++ int count; /**< Number of buffers of this size */ ++ int size; /**< Size in bytes */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ enum { ++ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ ++ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ ++ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ ++ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ ++ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ ++ } flags; ++ unsigned long agp_start; /**< ++ * Start address of where the AGP buffers are ++ * in the AGP aperture ++ */ ++}; ++ ++/** ++ * DRM_IOCTL_INFO_BUFS ioctl argument type. ++ */ ++struct drm_buf_info { ++ int count; /**< Number of buffers described in list */ ++ struct drm_buf_desc __user *list; /**< List of buffer descriptions */ ++}; ++ ++/** ++ * DRM_IOCTL_FREE_BUFS ioctl argument type. ++ */ ++struct drm_buf_free { ++ int count; ++ int __user *list; ++}; ++ ++/** ++ * Buffer information ++ * ++ * \sa drm_buf_map. ++ */ ++struct drm_buf_pub { ++ int idx; /**< Index into the master buffer list */ ++ int total; /**< Buffer size */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ void __user *address; /**< Address of buffer */ ++}; ++ ++/** ++ * DRM_IOCTL_MAP_BUFS ioctl argument type. ++ */ ++struct drm_buf_map { ++ int count; /**< Length of the buffer list */ ++#if defined(__cplusplus) ++ void __user *c_virtual; ++#else ++ void __user *virtual; /**< Mmap'd area in user-virtual */ ++#endif ++ struct drm_buf_pub __user *list; /**< Buffer information */ ++}; ++ ++/** ++ * DRM_IOCTL_DMA ioctl argument type. ++ * ++ * Indices here refer to the offset into the buffer list in drm_buf_get. ++ * ++ * \sa drmDMA(). ++ */ ++struct drm_dma { ++ int context; /**< Context handle */ ++ int send_count; /**< Number of buffers to send */ ++ int __user *send_indices; /**< List of handles to buffers */ ++ int __user *send_sizes; /**< Lengths of data to send */ ++ enum drm_dma_flags flags; /**< Flags */ ++ int request_count; /**< Number of buffers requested */ ++ int request_size; /**< Desired size for buffers */ ++ int __user *request_indices; /**< Buffer information */ ++ int __user *request_sizes; ++ int granted_count; /**< Number of buffers granted */ ++}; ++ ++enum drm_ctx_flags { ++ _DRM_CONTEXT_PRESERVED = 0x01, ++ _DRM_CONTEXT_2DONLY = 0x02 ++}; ++ ++/** ++ * DRM_IOCTL_ADD_CTX ioctl argument type. ++ * ++ * \sa drmCreateContext() and drmDestroyContext(). ++ */ ++struct drm_ctx { ++ drm_context_t handle; ++ enum drm_ctx_flags flags; ++}; ++ ++/** ++ * DRM_IOCTL_RES_CTX ioctl argument type. ++ */ ++struct drm_ctx_res { ++ int count; ++ struct drm_ctx __user *contexts; ++}; ++ ++/** ++ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. ++ */ ++struct drm_draw { ++ drm_drawable_t handle; ++}; ++ ++/** ++ * DRM_IOCTL_UPDATE_DRAW ioctl argument type. ++ */ ++typedef enum { ++ DRM_DRAWABLE_CLIPRECTS, ++} drm_drawable_info_type_t; ++ ++struct drm_update_draw { ++ drm_drawable_t handle; ++ unsigned int type; ++ unsigned int num; ++ unsigned long long data; ++}; ++ ++/** ++ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. ++ */ ++struct drm_auth { ++ drm_magic_t magic; ++}; ++ ++/** ++ * DRM_IOCTL_IRQ_BUSID ioctl argument type. ++ * ++ * \sa drmGetInterruptFromBusID(). ++ */ ++struct drm_irq_busid { ++ int irq; /**< IRQ number */ ++ int busnum; /**< bus number */ ++ int devnum; /**< device number */ ++ int funcnum; /**< function number */ ++}; ++ ++enum drm_vblank_seq_type { ++ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ ++ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ ++ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ ++ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ ++ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ ++ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ ++}; ++ ++#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) ++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ ++ _DRM_VBLANK_NEXTONMISS) ++ ++struct drm_wait_vblank_request { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ unsigned long signal; ++}; ++ ++struct drm_wait_vblank_reply { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ long tval_sec; ++ long tval_usec; ++}; ++ ++/** ++ * DRM_IOCTL_WAIT_VBLANK ioctl argument type. ++ * ++ * \sa drmWaitVBlank(). ++ */ ++union drm_wait_vblank { ++ struct drm_wait_vblank_request request; ++ struct drm_wait_vblank_reply reply; ++}; ++ ++ ++#define _DRM_PRE_MODESET 1 ++#define _DRM_POST_MODESET 2 ++ ++/** ++ * DRM_IOCTL_MODESET_CTL ioctl argument type ++ * ++ * \sa drmModesetCtl(). ++ */ ++struct drm_modeset_ctl { ++ uint32_t crtc; ++ uint32_t cmd; ++}; ++ ++/** ++ * DRM_IOCTL_AGP_ENABLE ioctl argument type. ++ * ++ * \sa drmAgpEnable(). ++ */ ++struct drm_agp_mode { ++ unsigned long mode; /**< AGP mode */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. ++ * ++ * \sa drmAgpAlloc() and drmAgpFree(). ++ */ ++struct drm_agp_buffer { ++ unsigned long size; /**< In bytes -- will round to page boundary */ ++ unsigned long handle; /**< Used for binding / unbinding */ ++ unsigned long type; /**< Type of memory to allocate */ ++ unsigned long physical; /**< Physical used by i810 */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. ++ * ++ * \sa drmAgpBind() and drmAgpUnbind(). ++ */ ++struct drm_agp_binding { ++ unsigned long handle; /**< From drm_agp_buffer */ ++ unsigned long offset; /**< In bytes -- will round to page boundary */ ++}; ++ ++/** ++ * DRM_IOCTL_AGP_INFO ioctl argument type. ++ * ++ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), ++ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), ++ * drmAgpVendorId() and drmAgpDeviceId(). ++ */ ++struct drm_agp_info { ++ int agp_version_major; ++ int agp_version_minor; ++ unsigned long mode; ++ unsigned long aperture_base; /**< physical address */ ++ unsigned long aperture_size; /**< bytes */ ++ unsigned long memory_allowed; /**< bytes */ ++ unsigned long memory_used; ++ ++ /** \name PCI information */ ++ /*@{ */ ++ unsigned short id_vendor; ++ unsigned short id_device; ++ /*@} */ ++}; ++ ++/** ++ * DRM_IOCTL_SG_ALLOC ioctl argument type. ++ */ ++struct drm_scatter_gather { ++ unsigned long size; /**< In bytes -- will round to page boundary */ ++ unsigned long handle; /**< Used for mapping / unmapping */ ++}; ++ ++/** ++ * DRM_IOCTL_SET_VERSION ioctl argument type. ++ */ ++struct drm_set_version { ++ int drm_di_major; ++ int drm_di_minor; ++ int drm_dd_major; ++ int drm_dd_minor; ++}; ++ ++ ++#define DRM_FENCE_FLAG_EMIT 0x00000001 ++#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 ++#define DRM_FENCE_FLAG_NO_USER 0x00000010 ++ ++/* Reserved for driver use */ ++#define DRM_FENCE_MASK_DRIVER 0xFF000000 ++ ++#define DRM_FENCE_TYPE_EXE 0x00000001 ++ ++struct drm_fence_arg { ++ unsigned int handle; ++ unsigned int fence_class; ++ unsigned int type; ++ unsigned int flags; ++ unsigned int signaled; ++ unsigned int error; ++ unsigned int sequence; ++ unsigned int pad64; ++ uint64_t expand_pad[2]; /*Future expansion */ ++}; ++ ++/* Buffer permissions, referring to how the GPU uses the buffers. ++ * these translate to fence types used for the buffers. ++ * Typically a texture buffer is read, A destination buffer is write and ++ * a command (batch-) buffer is exe. Can be or-ed together. ++ */ ++ ++#define DRM_BO_FLAG_READ (1ULL << 0) ++#define DRM_BO_FLAG_WRITE (1ULL << 1) ++#define DRM_BO_FLAG_EXE (1ULL << 2) ++ ++/* ++ * All of the bits related to access mode ++ */ ++#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) ++/* ++ * Status flags. Can be read to determine the actual state of a buffer. ++ * Can also be set in the buffer mask before validation. ++ */ ++ ++/* ++ * Mask: Never evict this buffer. Not even with force. This type of buffer is only ++ * available to root and must be manually removed before buffer manager shutdown ++ * or lock. ++ * Flags: Acknowledge ++ */ ++#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) ++ ++/* ++ * Mask: Require that the buffer is placed in mappable memory when validated. ++ * If not set the buffer may or may not be in mappable memory when validated. ++ * Flags: If set, the buffer is in mappable memory. ++ */ ++#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) ++ ++/* Mask: The buffer should be shareable with other processes. ++ * Flags: The buffer is shareable with other processes. ++ */ ++#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) ++ ++/* Mask: If set, place the buffer in cache-coherent memory if available. ++ * If clear, never place the buffer in cache coherent memory if validated. ++ * Flags: The buffer is currently in cache-coherent memory. ++ */ ++#define DRM_BO_FLAG_CACHED (1ULL << 7) ++ ++/* Mask: Make sure that every time this buffer is validated, ++ * it ends up on the same location provided that the memory mask is the same. ++ * The buffer will also not be evicted when claiming space for ++ * other buffers. Basically a pinned buffer but it may be thrown out as ++ * part of buffer manager shutdown or locking. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) ++ ++/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction ++ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART ++ * with unsnooped PTEs instead of snooped, by using chipset-specific cache ++ * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, ++ * as the eviction to local memory (TTM unbind) on map is just a side effect ++ * to prevent aggressive cache prefetch from the GPU disturbing the cache ++ * management that the DRM is doing. ++ * ++ * Flags: Acknowledge. ++ * Buffers allocated with this flag should not be used for suballocators ++ * This type may have issues on CPUs with over-aggressive caching ++ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 ++ */ ++#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) ++ ++ ++/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) ++ ++/* ++ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. ++ * Flags: Acknowledge. ++ */ ++#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) ++#define DRM_BO_FLAG_TILE (1ULL << 15) ++ ++/* ++ * Memory type flags that can be or'ed together in the mask, but only ++ * one appears in flags. ++ */ ++ ++/* System memory */ ++#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) ++/* Translation table memory */ ++#define DRM_BO_FLAG_MEM_TT (1ULL << 25) ++/* Vram memory */ ++#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) ++/* Up to the driver to define. */ ++#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) ++#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) ++#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) ++#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) ++#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) ++/* We can add more of these now with a 64-bit flag type */ ++ ++/* ++ * This is a mask covering all of the memory type flags; easier to just ++ * use a single constant than a bunch of | values. It covers ++ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 ++ */ ++#define DRM_BO_MASK_MEM 0x00000000FF000000ULL ++/* ++ * This adds all of the CPU-mapping options in with the memory ++ * type to label all bits which change how the page gets mapped ++ */ ++#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ ++ DRM_BO_FLAG_CACHED_MAPPED | \ ++ DRM_BO_FLAG_CACHED | \ ++ DRM_BO_FLAG_MAPPABLE) ++ ++/* Driver-private flags */ ++#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL ++ ++/* ++ * Don't block on validate and map. Instead, return EBUSY. ++ */ ++#define DRM_BO_HINT_DONT_BLOCK 0x00000002 ++/* ++ * Don't place this buffer on the unfenced list. This means ++ * that the buffer will not end up having a fence associated ++ * with it as a result of this operation ++ */ ++#define DRM_BO_HINT_DONT_FENCE 0x00000004 ++/** ++ * On hardware with no interrupt events for operation completion, ++ * indicates that the kernel should sleep while waiting for any blocking ++ * operation to complete rather than spinning. ++ * ++ * Has no effect otherwise. ++ */ ++#define DRM_BO_HINT_WAIT_LAZY 0x00000008 ++/* ++ * The client has compute relocations refering to this buffer using the ++ * offset in the presumed_offset field. If that offset ends up matching ++ * where this buffer lands, the kernel is free to skip executing those ++ * relocations ++ */ ++#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 ++ ++#define DRM_BO_INIT_MAGIC 0xfe769812 ++#define DRM_BO_INIT_MAJOR 1 ++#define DRM_BO_INIT_MINOR 0 ++#define DRM_BO_INIT_PATCH 0 ++ ++ ++struct drm_bo_info_req { ++ uint64_t mask; ++ uint64_t flags; ++ unsigned int handle; ++ unsigned int hint; ++ unsigned int fence_class; ++ unsigned int desired_tile_stride; ++ unsigned int tile_info; ++ unsigned int pad64; ++ uint64_t presumed_offset; ++}; ++ ++struct drm_bo_create_req { ++ uint64_t flags; ++ uint64_t size; ++ uint64_t buffer_start; ++ unsigned int hint; ++ unsigned int page_alignment; ++}; ++ ++ ++/* ++ * Reply flags ++ */ ++ ++#define DRM_BO_REP_BUSY 0x00000001 ++ ++struct drm_bo_info_rep { ++ uint64_t flags; ++ uint64_t proposed_flags; ++ uint64_t size; ++ uint64_t offset; ++ uint64_t arg_handle; ++ uint64_t buffer_start; ++ unsigned int handle; ++ unsigned int fence_flags; ++ unsigned int rep_flags; ++ unsigned int page_alignment; ++ unsigned int desired_tile_stride; ++ unsigned int hw_tile_stride; ++ unsigned int tile_info; ++ unsigned int pad64; ++ uint64_t expand_pad[4]; /*Future expansion */ ++}; ++ ++struct drm_bo_arg_rep { ++ struct drm_bo_info_rep bo_info; ++ int ret; ++ unsigned int pad64; ++}; ++ ++struct drm_bo_create_arg { ++ union { ++ struct drm_bo_create_req req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_handle_arg { ++ unsigned int handle; ++}; ++ ++struct drm_bo_reference_info_arg { ++ union { ++ struct drm_bo_handle_arg req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_map_wait_idle_arg { ++ union { ++ struct drm_bo_info_req req; ++ struct drm_bo_info_rep rep; ++ } d; ++}; ++ ++struct drm_bo_op_req { ++ enum { ++ drm_bo_validate, ++ drm_bo_fence, ++ drm_bo_ref_fence, ++ } op; ++ unsigned int arg_handle; ++ struct drm_bo_info_req bo_req; ++}; ++ ++ ++struct drm_bo_op_arg { ++ uint64_t next; ++ union { ++ struct drm_bo_op_req req; ++ struct drm_bo_arg_rep rep; ++ } d; ++ int handled; ++ unsigned int pad64; ++}; ++ ++ ++#define DRM_BO_MEM_LOCAL 0 ++#define DRM_BO_MEM_TT 1 ++#define DRM_BO_MEM_VRAM 2 ++#define DRM_BO_MEM_PRIV0 3 ++#define DRM_BO_MEM_PRIV1 4 ++#define DRM_BO_MEM_PRIV2 5 ++#define DRM_BO_MEM_PRIV3 6 ++#define DRM_BO_MEM_PRIV4 7 ++ ++#define DRM_BO_MEM_TYPES 8 /* For now. */ ++ ++#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) ++#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) ++ ++struct drm_bo_version_arg { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t patchlevel; ++}; ++ ++struct drm_mm_type_arg { ++ unsigned int mem_type; ++ unsigned int lock_flags; ++}; ++ ++struct drm_mm_init_arg { ++ unsigned int magic; ++ unsigned int major; ++ unsigned int minor; ++ unsigned int mem_type; ++ uint64_t p_offset; ++ uint64_t p_size; ++}; ++ ++struct drm_mm_info_arg { ++ unsigned int mem_type; ++ uint64_t p_size; ++}; ++ ++struct drm_gem_close { ++ /** Handle of the object to be closed. */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_gem_flink { ++ /** Handle for the object being named */ ++ uint32_t handle; ++ ++ /** Returned global name */ ++ uint32_t name; ++}; ++ ++struct drm_gem_open { ++ /** Name of object being opened */ ++ uint32_t name; ++ ++ /** Returned handle for the object */ ++ uint32_t handle; ++ ++ /** Returned size of the object */ ++ uint64_t size; ++}; ++ ++/** ++ * \name Ioctls Definitions ++ */ ++/*@{*/ ++ ++#define DRM_IOCTL_BASE 'd' ++#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) ++#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) ++#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) ++#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) ++ ++#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) ++#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) ++#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) ++#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) ++#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) ++#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) ++#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) ++#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) ++#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) ++ ++#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) ++#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) ++#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) ++ ++#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) ++#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) ++#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) ++#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) ++#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) ++#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) ++#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) ++#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) ++#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) ++#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) ++#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) ++ ++#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) ++ ++#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) ++#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) ++ ++#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) ++#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) ++#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) ++#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) ++#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) ++#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) ++#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) ++#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) ++#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) ++#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) ++#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) ++#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) ++#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) ++ ++#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) ++#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) ++#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) ++#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) ++#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) ++#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) ++#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) ++#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) ++ ++#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) ++#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) ++ ++#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) ++ ++#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) ++ ++#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) ++#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) ++#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) ++#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) ++ ++#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) ++#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) ++ ++#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) ++#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) ++#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) ++#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) ++#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) ++#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) ++#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) ++#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) ++ ++/*@}*/ ++ ++/** ++ * Device specific ioctls should only be in their respective headers ++ * The device specific ioctl range is from 0x40 to 0x99. ++ * Generic IOCTLS restart at 0xA0. ++ * ++ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and ++ * drmCommandReadWrite(). ++ */ ++#define DRM_COMMAND_BASE 0x40 ++#define DRM_COMMAND_END 0xA0 ++ ++/* typedef area */ ++#ifndef __KERNEL__ ++typedef struct drm_clip_rect drm_clip_rect_t; ++typedef struct drm_tex_region drm_tex_region_t; ++typedef struct drm_hw_lock drm_hw_lock_t; ++typedef struct drm_version drm_version_t; ++typedef struct drm_unique drm_unique_t; ++typedef struct drm_list drm_list_t; ++typedef struct drm_block drm_block_t; ++typedef struct drm_control drm_control_t; ++typedef enum drm_map_type drm_map_type_t; ++typedef enum drm_map_flags drm_map_flags_t; ++typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; ++typedef struct drm_map drm_map_t; ++typedef struct drm_client drm_client_t; ++typedef enum drm_stat_type drm_stat_type_t; ++typedef struct drm_stats drm_stats_t; ++typedef enum drm_lock_flags drm_lock_flags_t; ++typedef struct drm_lock drm_lock_t; ++typedef enum drm_dma_flags drm_dma_flags_t; ++typedef struct drm_buf_desc drm_buf_desc_t; ++typedef struct drm_buf_info drm_buf_info_t; ++typedef struct drm_buf_free drm_buf_free_t; ++typedef struct drm_buf_pub drm_buf_pub_t; ++typedef struct drm_buf_map drm_buf_map_t; ++typedef struct drm_dma drm_dma_t; ++typedef union drm_wait_vblank drm_wait_vblank_t; ++typedef struct drm_agp_mode drm_agp_mode_t; ++typedef enum drm_ctx_flags drm_ctx_flags_t; ++typedef struct drm_ctx drm_ctx_t; ++typedef struct drm_ctx_res drm_ctx_res_t; ++typedef struct drm_draw drm_draw_t; ++typedef struct drm_update_draw drm_update_draw_t; ++typedef struct drm_auth drm_auth_t; ++typedef struct drm_irq_busid drm_irq_busid_t; ++typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; ++typedef struct drm_agp_buffer drm_agp_buffer_t; ++typedef struct drm_agp_binding drm_agp_binding_t; ++typedef struct drm_agp_info drm_agp_info_t; ++typedef struct drm_scatter_gather drm_scatter_gather_t; ++typedef struct drm_set_version drm_set_version_t; ++ ++typedef struct drm_fence_arg drm_fence_arg_t; ++typedef struct drm_mm_type_arg drm_mm_type_arg_t; ++typedef struct drm_mm_init_arg drm_mm_init_arg_t; ++typedef enum drm_bo_type drm_bo_type_t; ++#endif ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.c git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c +--- git/drivers/gpu/drm-tungsten/drm_hashtab.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,207 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple open hash tab implementation. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "drm_hashtab.h" ++#include ++ ++int drm_ht_create(struct drm_open_hash *ht, unsigned int order) ++{ ++ unsigned int i; ++ ++ ht->size = 1 << order; ++ ht->order = order; ++ ht->fill = 0; ++ ht->table = NULL; ++ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); ++ if (!ht->use_vmalloc) { ++ ht->table = drm_calloc(ht->size, sizeof(*ht->table), ++ DRM_MEM_HASHTAB); ++ } ++ if (!ht->table) { ++ ht->use_vmalloc = 1; ++ ht->table = vmalloc(ht->size * sizeof(*ht->table)); ++ } ++ if (!ht->table) { ++ DRM_ERROR("Out of memory for hash table\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < ht->size; ++i) { ++ INIT_HLIST_HEAD(&ht->table[i]); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_create); ++ ++void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list; ++ unsigned int hashed_key; ++ int count = 0; ++ ++ hashed_key = hash_long(key, ht->order); ++ DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); ++ h_list = &ht->table[hashed_key]; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); ++ } ++} ++ ++static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, ++ unsigned long key) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list; ++ unsigned int hashed_key; ++ ++ hashed_key = hash_long(key, ht->order); ++ h_list = &ht->table[hashed_key]; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ if (entry->key == key) ++ return list; ++ if (entry->key > key) ++ break; ++ } ++ return NULL; ++} ++ ++int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) ++{ ++ struct drm_hash_item *entry; ++ struct hlist_head *h_list; ++ struct hlist_node *list, *parent; ++ unsigned int hashed_key; ++ unsigned long key = item->key; ++ ++ hashed_key = hash_long(key, ht->order); ++ h_list = &ht->table[hashed_key]; ++ parent = NULL; ++ hlist_for_each(list, h_list) { ++ entry = hlist_entry(list, struct drm_hash_item, head); ++ if (entry->key == key) ++ return -EINVAL; ++ if (entry->key > key) ++ break; ++ parent = list; ++ } ++ if (parent) { ++ hlist_add_after(parent, &item->head); ++ } else { ++ hlist_add_head(&item->head, h_list); ++ } ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_insert_item); ++ ++/* ++ * Just insert an item and return any "bits" bit key that hasn't been ++ * used before. ++ */ ++int drm_ht_just_insert_please(struct drm_open_hash *ht, ++ struct drm_hash_item *item, ++ unsigned long seed, int bits, int shift, ++ unsigned long add) ++{ ++ int ret; ++ unsigned long mask = (1 << bits) - 1; ++ unsigned long first, unshifted_key; ++ ++ unshifted_key = hash_long(seed, bits); ++ first = unshifted_key; ++ do { ++ item->key = (unshifted_key << shift) + add; ++ ret = drm_ht_insert_item(ht, item); ++ if (ret) ++ unshifted_key = (unshifted_key + 1) & mask; ++ } while (ret && (unshifted_key != first)); ++ ++ if (ret) { ++ DRM_ERROR("Available key bit space exhausted\n"); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, ++ struct drm_hash_item **item) ++{ ++ struct hlist_node *list; ++ ++ list = drm_ht_find_key(ht, key); ++ if (!list) ++ return -EINVAL; ++ ++ *item = hlist_entry(list, struct drm_hash_item, head); ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_find_item); ++ ++int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) ++{ ++ struct hlist_node *list; ++ ++ list = drm_ht_find_key(ht, key); ++ if (list) { ++ hlist_del_init(list); ++ ht->fill--; ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) ++{ ++ hlist_del_init(&item->head); ++ ht->fill--; ++ return 0; ++} ++EXPORT_SYMBOL(drm_ht_remove_item); ++ ++void drm_ht_remove(struct drm_open_hash *ht) ++{ ++ if (ht->table) { ++ if (ht->use_vmalloc) ++ vfree(ht->table); ++ else ++ drm_free(ht->table, ht->size * sizeof(*ht->table), ++ DRM_MEM_HASHTAB); ++ ht->table = NULL; ++ } ++} ++EXPORT_SYMBOL(drm_ht_remove); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.h git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h +--- git/drivers/gpu/drm-tungsten/drm_hashtab.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,67 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple open hash tab implementation. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#ifndef DRM_HASHTAB_H ++#define DRM_HASHTAB_H ++ ++#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) ++ ++struct drm_hash_item { ++ struct hlist_node head; ++ unsigned long key; ++}; ++ ++struct drm_open_hash { ++ unsigned int size; ++ unsigned int order; ++ unsigned int fill; ++ struct hlist_head *table; ++ int use_vmalloc; ++}; ++ ++ ++extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); ++extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); ++extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, ++ unsigned long seed, int bits, int shift, ++ unsigned long add); ++extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); ++ ++extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); ++extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); ++extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); ++extern void drm_ht_remove(struct drm_open_hash *ht); ++ ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_internal.h git-nokia/drivers/gpu/drm-tungsten/drm_internal.h +--- git/drivers/gpu/drm-tungsten/drm_internal.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_internal.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright 2007 Red Hat, Inc ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* This header file holds function prototypes and data types that are ++ * internal to the drm (not exported to user space) but shared across ++ * drivers and platforms */ ++ ++#ifndef __DRM_INTERNAL_H__ ++#define __DRM_INTERNAL_H__ ++ ++/** ++ * Drawable information. ++ */ ++struct drm_drawable_info { ++ unsigned int num_rects; ++ struct drm_clip_rect *rects; ++}; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioc32.c git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c +--- git/drivers/gpu/drm-tungsten/drm_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1073 @@ ++/** ++ * \file drm_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the DRM. ++ * ++ * \author Paul Mackerras ++ * ++ * Copyright (C) Paul Mackerras 2005. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) ++#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) ++#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t) ++#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t) ++#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t) ++ ++#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t) ++#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t) ++#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t) ++#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t) ++#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t) ++#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t) ++#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t) ++ ++#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t) ++ ++#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t) ++#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t) ++ ++#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t) ++#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t) ++ ++#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t) ++#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t) ++#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t) ++#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t) ++#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t) ++#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t) ++ ++#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) ++#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) ++ ++#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) ++ ++typedef struct drm_version_32 { ++ int version_major; /**< Major version */ ++ int version_minor; /**< Minor version */ ++ int version_patchlevel; /**< Patch level */ ++ u32 name_len; /**< Length of name buffer */ ++ u32 name; /**< Name of driver */ ++ u32 date_len; /**< Length of date buffer */ ++ u32 date; /**< User-space buffer to hold date */ ++ u32 desc_len; /**< Length of desc buffer */ ++ u32 desc; /**< User-space buffer to hold desc */ ++} drm_version32_t; ++ ++static int compat_drm_version(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_version32_t v32; ++ struct drm_version __user *version; ++ int err; ++ ++ if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) ++ return -EFAULT; ++ ++ version = compat_alloc_user_space(sizeof(*version)); ++ if (!access_ok(VERIFY_WRITE, version, sizeof(*version))) ++ return -EFAULT; ++ if (__put_user(v32.name_len, &version->name_len) ++ || __put_user((void __user *)(unsigned long)v32.name, ++ &version->name) ++ || __put_user(v32.date_len, &version->date_len) ++ || __put_user((void __user *)(unsigned long)v32.date, ++ &version->date) ++ || __put_user(v32.desc_len, &version->desc_len) ++ || __put_user((void __user *)(unsigned long)v32.desc, ++ &version->desc)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_VERSION, (unsigned long)version); ++ if (err) ++ return err; ++ ++ if (__get_user(v32.version_major, &version->version_major) ++ || __get_user(v32.version_minor, &version->version_minor) ++ || __get_user(v32.version_patchlevel, &version->version_patchlevel) ++ || __get_user(v32.name_len, &version->name_len) ++ || __get_user(v32.date_len, &version->date_len) ++ || __get_user(v32.desc_len, &version->desc_len)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &v32, sizeof(v32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_unique32 { ++ u32 unique_len; /**< Length of unique */ ++ u32 unique; /**< Unique name for driver instantiation */ ++} drm_unique32_t; ++ ++static int compat_drm_getunique(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_unique32_t uq32; ++ struct drm_unique __user *u; ++ int err; ++ ++ if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) ++ return -EFAULT; ++ ++ u = compat_alloc_user_space(sizeof(*u)); ++ if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) ++ return -EFAULT; ++ if (__put_user(uq32.unique_len, &u->unique_len) ++ || __put_user((void __user *)(unsigned long)uq32.unique, ++ &u->unique)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_UNIQUE, (unsigned long)u); ++ if (err) ++ return err; ++ ++ if (__get_user(uq32.unique_len, &u->unique_len)) ++ return -EFAULT; ++ if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int compat_drm_setunique(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_unique32_t uq32; ++ struct drm_unique __user *u; ++ ++ if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) ++ return -EFAULT; ++ ++ u = compat_alloc_user_space(sizeof(*u)); ++ if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) ++ return -EFAULT; ++ if (__put_user(uq32.unique_len, &u->unique_len) ++ || __put_user((void __user *)(unsigned long)uq32.unique, ++ &u->unique)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SET_UNIQUE, (unsigned long)u); ++} ++ ++typedef struct drm_map32 { ++ u32 offset; /**< Requested physical address (0 for SAREA)*/ ++ u32 size; /**< Requested physical size (bytes) */ ++ enum drm_map_type type; /**< Type of memory to map */ ++ enum drm_map_flags flags; /**< Flags */ ++ u32 handle; /**< User-space: "Handle" to pass to mmap() */ ++ int mtrr; /**< MTRR slot used */ ++} drm_map32_t; ++ ++static int compat_drm_getmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ drm_map32_t m32; ++ struct drm_map __user *map; ++ int idx, err; ++ void *handle; ++ ++ if (get_user(idx, &argp->offset)) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user(idx, &map->offset)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_MAP, (unsigned long)map); ++ if (err) ++ return err; ++ ++ if (__get_user(m32.offset, &map->offset) ++ || __get_user(m32.size, &map->size) ++ || __get_user(m32.type, &map->type) ++ || __get_user(m32.flags, &map->flags) ++ || __get_user(handle, &map->handle) ++ || __get_user(m32.mtrr, &map->mtrr)) ++ return -EFAULT; ++ ++ m32.handle = (unsigned long)handle; ++ if (copy_to_user(argp, &m32, sizeof(m32))) ++ return -EFAULT; ++ return 0; ++ ++} ++ ++static int compat_drm_addmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ drm_map32_t m32; ++ struct drm_map __user *map; ++ int err; ++ void *handle; ++ ++ if (copy_from_user(&m32, argp, sizeof(m32))) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user(m32.offset, &map->offset) ++ || __put_user(m32.size, &map->size) ++ || __put_user(m32.type, &map->type) ++ || __put_user(m32.flags, &map->flags)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_ADD_MAP, (unsigned long)map); ++ if (err) ++ return err; ++ ++ if (__get_user(m32.offset, &map->offset) ++ || __get_user(m32.mtrr, &map->mtrr) ++ || __get_user(handle, &map->handle)) ++ return -EFAULT; ++ ++ m32.handle = (unsigned long)handle; ++ if (m32.handle != (unsigned long)handle && printk_ratelimit()) ++ printk(KERN_ERR "compat_drm_addmap truncated handle" ++ " %p for type %d offset %x\n", ++ handle, m32.type, m32.offset); ++ ++ if (copy_to_user(argp, &m32, sizeof(m32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_rmmap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_map32_t __user *argp = (void __user *)arg; ++ struct drm_map __user *map; ++ u32 handle; ++ ++ if (get_user(handle, &argp->handle)) ++ return -EFAULT; ++ ++ map = compat_alloc_user_space(sizeof(*map)); ++ if (!access_ok(VERIFY_WRITE, map, sizeof(*map))) ++ return -EFAULT; ++ if (__put_user((void *)(unsigned long)handle, &map->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RM_MAP, (unsigned long)map); ++} ++ ++typedef struct drm_client32 { ++ int idx; /**< Which client desired? */ ++ int auth; /**< Is client authenticated? */ ++ u32 pid; /**< Process ID */ ++ u32 uid; /**< User ID */ ++ u32 magic; /**< Magic */ ++ u32 iocs; /**< Ioctl count */ ++} drm_client32_t; ++ ++static int compat_drm_getclient(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_client32_t c32; ++ drm_client32_t __user *argp = (void __user *)arg; ++ struct drm_client __user *client; ++ int idx, err; ++ ++ if (get_user(idx, &argp->idx)) ++ return -EFAULT; ++ ++ client = compat_alloc_user_space(sizeof(*client)); ++ if (!access_ok(VERIFY_WRITE, client, sizeof(*client))) ++ return -EFAULT; ++ if (__put_user(idx, &client->idx)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_CLIENT, (unsigned long)client); ++ if (err) ++ return err; ++ ++ if (__get_user(c32.auth, &client->auth) ++ || __get_user(c32.pid, &client->pid) ++ || __get_user(c32.uid, &client->uid) ++ || __get_user(c32.magic, &client->magic) ++ || __get_user(c32.iocs, &client->iocs)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &c32, sizeof(c32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_stats32 { ++ u32 count; ++ struct { ++ u32 value; ++ enum drm_stat_type type; ++ } data[15]; ++} drm_stats32_t; ++ ++static int compat_drm_getstats(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_stats32_t s32; ++ drm_stats32_t __user *argp = (void __user *)arg; ++ struct drm_stats __user *stats; ++ int i, err; ++ ++ stats = compat_alloc_user_space(sizeof(*stats)); ++ if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_STATS, (unsigned long)stats); ++ if (err) ++ return err; ++ ++ if (__get_user(s32.count, &stats->count)) ++ return -EFAULT; ++ for (i = 0; i < 15; ++i) ++ if (__get_user(s32.data[i].value, &stats->data[i].value) ++ || __get_user(s32.data[i].type, &stats->data[i].type)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &s32, sizeof(s32))) ++ return -EFAULT; ++ return 0; ++} ++ ++typedef struct drm_buf_desc32 { ++ int count; /**< Number of buffers of this size */ ++ int size; /**< Size in bytes */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ int flags; ++ u32 agp_start; /**< Start address in the AGP aperture */ ++} drm_buf_desc32_t; ++ ++static int compat_drm_addbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_desc32_t __user *argp = (void __user *)arg; ++ struct drm_buf_desc __user *buf; ++ int err; ++ unsigned long agp_start; ++ ++ buf = compat_alloc_user_space(sizeof(*buf)); ++ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) ++ return -EFAULT; ++ ++ if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start)) ++ || __get_user(agp_start, &argp->agp_start) ++ || __put_user(agp_start, &buf->agp_start)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_ADD_BUFS, (unsigned long)buf); ++ if (err) ++ return err; ++ ++ if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start)) ++ || __get_user(agp_start, &buf->agp_start) ++ || __put_user(agp_start, &argp->agp_start)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_markbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_desc32_t b32; ++ drm_buf_desc32_t __user *argp = (void __user *)arg; ++ struct drm_buf_desc __user *buf; ++ ++ if (copy_from_user(&b32, argp, sizeof(b32))) ++ return -EFAULT; ++ ++ buf = compat_alloc_user_space(sizeof(*buf)); ++ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))) ++ return -EFAULT; ++ ++ if (__put_user(b32.size, &buf->size) ++ || __put_user(b32.low_mark, &buf->low_mark) ++ || __put_user(b32.high_mark, &buf->high_mark)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MARK_BUFS, (unsigned long)buf); ++} ++ ++typedef struct drm_buf_info32 { ++ int count; /**< Entries in list */ ++ u32 list; ++} drm_buf_info32_t; ++ ++static int compat_drm_infobufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_info32_t req32; ++ drm_buf_info32_t __user *argp = (void __user *)arg; ++ drm_buf_desc32_t __user *to; ++ struct drm_buf_info __user *request; ++ struct drm_buf_desc __user *list; ++ size_t nbytes; ++ int i, err; ++ int count, actual; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ count = req32.count; ++ to = (drm_buf_desc32_t __user *)(unsigned long)req32.list; ++ if (count < 0) ++ count = 0; ++ if (count > 0 ++ && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) ++ return -EFAULT; ++ ++ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); ++ request = compat_alloc_user_space(nbytes); ++ if (!access_ok(VERIFY_WRITE, request, nbytes)) ++ return -EFAULT; ++ list = (struct drm_buf_desc *) (request + 1); ++ ++ if (__put_user(count, &request->count) ++ || __put_user(list, &request->list)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_INFO_BUFS, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(actual, &request->count)) ++ return -EFAULT; ++ if (count >= actual) ++ for (i = 0; i < actual; ++i) ++ if (__copy_in_user(&to[i], &list[i], ++ offsetof(struct drm_buf_desc, flags))) ++ return -EFAULT; ++ ++ if (__put_user(actual, &argp->count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_buf_pub32 { ++ int idx; /**< Index into the master buffer list */ ++ int total; /**< Buffer size */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ u32 address; /**< Address of buffer */ ++} drm_buf_pub32_t; ++ ++typedef struct drm_buf_map32 { ++ int count; /**< Length of the buffer list */ ++ u32 virtual; /**< Mmap'd area in user-virtual */ ++ u32 list; /**< Buffer information */ ++} drm_buf_map32_t; ++ ++static int compat_drm_mapbufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_map32_t __user *argp = (void __user *)arg; ++ drm_buf_map32_t req32; ++ drm_buf_pub32_t __user *list32; ++ struct drm_buf_map __user *request; ++ struct drm_buf_pub __user *list; ++ int i, err; ++ int count, actual; ++ size_t nbytes; ++ void __user *addr; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ count = req32.count; ++ list32 = (void __user *)(unsigned long)req32.list; ++ ++ if (count < 0) ++ return -EINVAL; ++ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); ++ request = compat_alloc_user_space(nbytes); ++ if (!access_ok(VERIFY_WRITE, request, nbytes)) ++ return -EFAULT; ++ list = (struct drm_buf_pub *) (request + 1); ++ ++ if (__put_user(count, &request->count) ++ || __put_user(list, &request->list)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MAP_BUFS, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(actual, &request->count)) ++ return -EFAULT; ++ if (count >= actual) ++ for (i = 0; i < actual; ++i) ++ if (__copy_in_user(&list32[i], &list[i], ++ offsetof(struct drm_buf_pub, address)) ++ || __get_user(addr, &list[i].address) ++ || __put_user((unsigned long)addr, ++ &list32[i].address)) ++ return -EFAULT; ++ ++ if (__put_user(actual, &argp->count) ++ || __get_user(addr, &request->virtual) ++ || __put_user((unsigned long)addr, &argp->virtual)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_buf_free32 { ++ int count; ++ u32 list; ++} drm_buf_free32_t; ++ ++static int compat_drm_freebufs(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_buf_free32_t req32; ++ struct drm_buf_free __user *request; ++ drm_buf_free32_t __user *argp = (void __user *)arg; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(req32.count, &request->count) ++ || __put_user((int __user *)(unsigned long)req32.list, ++ &request->list)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_FREE_BUFS, (unsigned long)request); ++} ++ ++typedef struct drm_ctx_priv_map32 { ++ unsigned int ctx_id; /**< Context requesting private mapping */ ++ u32 handle; /**< Handle of map */ ++} drm_ctx_priv_map32_t; ++ ++static int compat_drm_setsareactx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_ctx_priv_map32_t req32; ++ struct drm_ctx_priv_map __user *request; ++ drm_ctx_priv_map32_t __user *argp = (void __user *)arg; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(req32.ctx_id, &request->ctx_id) ++ || __put_user((void *)(unsigned long)req32.handle, ++ &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); ++} ++ ++static int compat_drm_getsareactx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct drm_ctx_priv_map __user *request; ++ drm_ctx_priv_map32_t __user *argp = (void __user *)arg; ++ int err; ++ unsigned int ctx_id; ++ void *handle; ++ ++ if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(ctx_id, &argp->ctx_id)) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))) ++ return -EFAULT; ++ if (__put_user(ctx_id, &request->ctx_id)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(handle, &request->handle) ++ || __put_user((unsigned long)handle, &argp->handle)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_ctx_res32 { ++ int count; ++ u32 contexts; ++} drm_ctx_res32_t; ++ ++static int compat_drm_resctx(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_ctx_res32_t __user *argp = (void __user *)arg; ++ drm_ctx_res32_t res32; ++ struct drm_ctx_res __user *res; ++ int err; ++ ++ if (copy_from_user(&res32, argp, sizeof(res32))) ++ return -EFAULT; ++ ++ res = compat_alloc_user_space(sizeof(*res)); ++ if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) ++ return -EFAULT; ++ if (__put_user(res32.count, &res->count) ++ || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, ++ &res->contexts)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RES_CTX, (unsigned long)res); ++ if (err) ++ return err; ++ ++ if (__get_user(res32.count, &res->count) ++ || __put_user(res32.count, &argp->count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_dma32 { ++ int context; /**< Context handle */ ++ int send_count; /**< Number of buffers to send */ ++ u32 send_indices; /**< List of handles to buffers */ ++ u32 send_sizes; /**< Lengths of data to send */ ++ enum drm_dma_flags flags; /**< Flags */ ++ int request_count; /**< Number of buffers requested */ ++ int request_size; /**< Desired size for buffers */ ++ u32 request_indices; /**< Buffer information */ ++ u32 request_sizes; ++ int granted_count; /**< Number of buffers granted */ ++} drm_dma32_t; ++ ++static int compat_drm_dma(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_dma32_t d32; ++ drm_dma32_t __user *argp = (void __user *)arg; ++ struct drm_dma __user *d; ++ int err; ++ ++ if (copy_from_user(&d32, argp, sizeof(d32))) ++ return -EFAULT; ++ ++ d = compat_alloc_user_space(sizeof(*d)); ++ if (!access_ok(VERIFY_WRITE, d, sizeof(*d))) ++ return -EFAULT; ++ ++ if (__put_user(d32.context, &d->context) ++ || __put_user(d32.send_count, &d->send_count) ++ || __put_user((int __user *)(unsigned long)d32.send_indices, ++ &d->send_indices) ++ || __put_user((int __user *)(unsigned long)d32.send_sizes, ++ &d->send_sizes) ++ || __put_user(d32.flags, &d->flags) ++ || __put_user(d32.request_count, &d->request_count) ++ || __put_user((int __user *)(unsigned long)d32.request_indices, ++ &d->request_indices) ++ || __put_user((int __user *)(unsigned long)d32.request_sizes, ++ &d->request_sizes)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_DMA, (unsigned long)d); ++ if (err) ++ return err; ++ ++ if (__get_user(d32.request_size, &d->request_size) ++ || __get_user(d32.granted_count, &d->granted_count) ++ || __put_user(d32.request_size, &argp->request_size) ++ || __put_user(d32.granted_count, &argp->granted_count)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++#if __OS_HAS_AGP ++typedef struct drm_agp_mode32 { ++ u32 mode; /**< AGP mode */ ++} drm_agp_mode32_t; ++ ++static int compat_drm_agp_enable(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_mode32_t __user *argp = (void __user *)arg; ++ drm_agp_mode32_t m32; ++ struct drm_agp_mode __user *mode; ++ ++ if (get_user(m32.mode, &argp->mode)) ++ return -EFAULT; ++ ++ mode = compat_alloc_user_space(sizeof(*mode)); ++ if (put_user(m32.mode, &mode->mode)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); ++} ++ ++typedef struct drm_agp_info32 { ++ int agp_version_major; ++ int agp_version_minor; ++ u32 mode; ++ u32 aperture_base; /* physical address */ ++ u32 aperture_size; /* bytes */ ++ u32 memory_allowed; /* bytes */ ++ u32 memory_used; ++ ++ /* PCI information */ ++ unsigned short id_vendor; ++ unsigned short id_device; ++} drm_agp_info32_t; ++ ++static int compat_drm_agp_info(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_info32_t __user *argp = (void __user *)arg; ++ drm_agp_info32_t i32; ++ struct drm_agp_info __user *info; ++ int err; ++ ++ info = compat_alloc_user_space(sizeof(*info)); ++ if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_INFO, (unsigned long)info); ++ if (err) ++ return err; ++ ++ if (__get_user(i32.agp_version_major, &info->agp_version_major) ++ || __get_user(i32.agp_version_minor, &info->agp_version_minor) ++ || __get_user(i32.mode, &info->mode) ++ || __get_user(i32.aperture_base, &info->aperture_base) ++ || __get_user(i32.aperture_size, &info->aperture_size) ++ || __get_user(i32.memory_allowed, &info->memory_allowed) ++ || __get_user(i32.memory_used, &info->memory_used) ++ || __get_user(i32.id_vendor, &info->id_vendor) ++ || __get_user(i32.id_device, &info->id_device)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &i32, sizeof(i32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++typedef struct drm_agp_buffer32 { ++ u32 size; /**< In bytes -- will round to page boundary */ ++ u32 handle; /**< Used for binding / unbinding */ ++ u32 type; /**< Type of memory to allocate */ ++ u32 physical; /**< Physical used by i810 */ ++} drm_agp_buffer32_t; ++ ++static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_buffer32_t __user *argp = (void __user *)arg; ++ drm_agp_buffer32_t req32; ++ struct drm_agp_buffer __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.size, &request->size) ++ || __put_user(req32.type, &request->type)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_ALLOC, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(req32.handle, &request->handle) ++ || __get_user(req32.physical, &request->physical) ++ || copy_to_user(argp, &req32, sizeof(req32))) { ++ drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_FREE, (unsigned long)request); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int compat_drm_agp_free(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_buffer32_t __user *argp = (void __user *)arg; ++ struct drm_agp_buffer __user *request; ++ u32 handle; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || get_user(handle, &argp->handle) ++ || __put_user(handle, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_FREE, (unsigned long)request); ++} ++ ++typedef struct drm_agp_binding32 { ++ u32 handle; /**< From drm_agp_buffer */ ++ u32 offset; /**< In bytes -- will round to page boundary */ ++} drm_agp_binding32_t; ++ ++static int compat_drm_agp_bind(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_binding32_t __user *argp = (void __user *)arg; ++ drm_agp_binding32_t req32; ++ struct drm_agp_binding __user *request; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.handle, &request->handle) ++ || __put_user(req32.offset, &request->offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_BIND, (unsigned long)request); ++} ++ ++static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_agp_binding32_t __user *argp = (void __user *)arg; ++ struct drm_agp_binding __user *request; ++ u32 handle; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || get_user(handle, &argp->handle) ++ || __put_user(handle, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_AGP_UNBIND, (unsigned long)request); ++} ++#endif /* __OS_HAS_AGP */ ++ ++typedef struct drm_scatter_gather32 { ++ u32 size; /**< In bytes -- will round to page boundary */ ++ u32 handle; /**< Used for mapping / unmapping */ ++} drm_scatter_gather32_t; ++ ++static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_scatter_gather32_t __user *argp = (void __user *)arg; ++ struct drm_scatter_gather __user *request; ++ int err; ++ unsigned long x; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(x, &argp->size) ++ || __put_user(x, &request->size)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SG_ALLOC, (unsigned long)request); ++ if (err) ++ return err; ++ ++ /* XXX not sure about the handle conversion here... */ ++ if (__get_user(x, &request->handle) ++ || __put_user(x >> PAGE_SHIFT, &argp->handle)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int compat_drm_sg_free(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_scatter_gather32_t __user *argp = (void __user *)arg; ++ struct drm_scatter_gather __user *request; ++ unsigned long x; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) ++ || __get_user(x, &argp->handle) ++ || __put_user(x << PAGE_SHIFT, &request->handle)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_SG_FREE, (unsigned long)request); ++} ++ ++struct drm_wait_vblank_request32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ u32 signal; ++}; ++ ++struct drm_wait_vblank_reply32 { ++ enum drm_vblank_seq_type type; ++ unsigned int sequence; ++ s32 tval_sec; ++ s32 tval_usec; ++}; ++ ++typedef union drm_wait_vblank32 { ++ struct drm_wait_vblank_request32 request; ++ struct drm_wait_vblank_reply32 reply; ++} drm_wait_vblank32_t; ++ ++static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_wait_vblank32_t __user *argp = (void __user *)arg; ++ drm_wait_vblank32_t req32; ++ union drm_wait_vblank __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, argp, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.request.type, &request->request.type) ++ || __put_user(req32.request.sequence, &request->request.sequence) ++ || __put_user(req32.request.signal, &request->request.signal)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); ++ if (err) ++ return err; ++ ++ if (__get_user(req32.reply.type, &request->reply.type) ++ || __get_user(req32.reply.sequence, &request->reply.sequence) ++ || __get_user(req32.reply.tval_sec, &request->reply.tval_sec) ++ || __get_user(req32.reply.tval_usec, &request->reply.tval_usec)) ++ return -EFAULT; ++ ++ if (copy_to_user(argp, &req32, sizeof(req32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++drm_ioctl_compat_t *drm_compat_ioctls[] = { ++ [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats, ++ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique, ++ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs, ++ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap, ++ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx, ++ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx, ++ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx, ++ [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma, ++#if __OS_HAS_AGP ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind, ++ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind, ++#endif ++ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, ++ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, ++ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/drm. ++ * ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn; ++ int ret; ++ ++ ++ /* Assume that ioctls without an explicit compat routine will "just ++ * work". This may not always be a good assumption, but it's better ++ * than always failing. ++ */ ++ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) ++ return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ ++ fn = drm_compat_ioctls[nr]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_compat_ioctl); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioctl.c git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c +--- git/drivers/gpu/drm-tungsten/drm_ioctl.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,351 @@ ++/** ++ * \file drm_ioctl.c ++ * IOCTL processing for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++#include "linux/pci.h" ++ ++/** ++ * Get the bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_unique structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Copies the bus id from drm_device::unique into user space. ++ */ ++int drm_getunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_unique *u = data; ++ ++ if (u->unique_len >= dev->unique_len) { ++ if (copy_to_user(u->unique, dev->unique, dev->unique_len)) ++ return -EFAULT; ++ } ++ u->unique_len = dev->unique_len; ++ ++ return 0; ++} ++ ++/** ++ * Set the bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_unique structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Copies the bus id from userspace into drm_device::unique, and verifies that ++ * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated ++ * in interface version 1.1 and will return EBUSY when setversion has requested ++ * version 1.1 or greater. ++ */ ++int drm_setunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_unique *u = data; ++ int domain, bus, slot, func, ret; ++ ++ if (dev->unique_len || dev->unique) ++ return -EBUSY; ++ ++ if (!u->unique_len || u->unique_len > 1024) ++ return -EINVAL; ++ ++ dev->unique_len = u->unique_len; ++ dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); ++ if (!dev->unique) ++ return -ENOMEM; ++ if (copy_from_user(dev->unique, u->unique, dev->unique_len)) ++ return -EFAULT; ++ ++ dev->unique[dev->unique_len] = '\0'; ++ ++ dev->devname = ++ drm_alloc(strlen(dev->driver->pci_driver.name) + ++ strlen(dev->unique) + 2, DRM_MEM_DRIVER); ++ if (!dev->devname) ++ return -ENOMEM; ++ ++ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, ++ dev->unique); ++ ++ /* Return error if the busid submitted doesn't match the device's actual ++ * busid. ++ */ ++ ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); ++ if (ret != 3) ++ return -EINVAL; ++ domain = bus >> 8; ++ bus &= 0xff; ++ ++ if ((domain != drm_get_pci_domain(dev)) || ++ (bus != dev->pdev->bus->number) || ++ (slot != PCI_SLOT(dev->pdev->devfn)) || ++ (func != PCI_FUNC(dev->pdev->devfn))) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static int drm_set_busid(struct drm_device * dev) ++{ ++ int len; ++ if (dev->unique != NULL) ++ return -EBUSY; ++ ++ dev->unique_len = 40; ++ dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); ++ if (dev->unique == NULL) ++ return -ENOMEM; ++ ++ len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", ++ drm_get_pci_domain(dev), ++ dev->pdev->bus->number, ++ PCI_SLOT(dev->pdev->devfn), ++ PCI_FUNC(dev->pdev->devfn)); ++ if (len > dev->unique_len) ++ DRM_ERROR("buffer overflow"); ++ ++ dev->devname = ++ drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + ++ 2, DRM_MEM_DRIVER); ++ if (dev->devname == NULL) ++ return -ENOMEM; ++ ++ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, ++ dev->unique); ++ ++ return 0; ++} ++ ++/** ++ * Get a mapping information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_map structure. ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches for the mapping with the specified offset and copies its information ++ * into userspace ++ */ ++int drm_getmap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_map *map = data; ++ struct drm_map_list *r_list = NULL; ++ struct list_head *list; ++ int idx; ++ int i; ++ ++ idx = map->offset; ++ ++ mutex_lock(&dev->struct_mutex); ++ if (idx < 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ i = 0; ++ list_for_each(list, &dev->maplist) { ++ if (i == idx) { ++ r_list = list_entry(list, struct drm_map_list, head); ++ break; ++ } ++ i++; ++ } ++ if (!r_list || !r_list->map) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ map->offset = r_list->map->offset; ++ map->size = r_list->map->size; ++ map->type = r_list->map->type; ++ map->flags = r_list->map->flags; ++ map->handle = (void *)(unsigned long) r_list->user_token; ++ map->mtrr = r_list->map->mtrr; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Get client information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_client structure. ++ * ++ * \return zero on success or a negative number on failure. ++ * ++ * Searches for the client with the specified index and copies its information ++ * into userspace ++ */ ++int drm_getclient(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_client *client = data; ++ struct drm_file *pt; ++ int idx; ++ int i; ++ ++ idx = client->idx; ++ mutex_lock(&dev->struct_mutex); ++ ++ i = 0; ++ list_for_each_entry(pt, &dev->filelist, lhead) { ++ if (i++ >= idx) { ++ client->auth = pt->authenticated; ++ client->pid = pt->pid; ++ client->uid = pt->uid; ++ client->magic = pt->magic; ++ client->iocs = pt->ioctl_count; ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++ return -EINVAL; ++} ++ ++/** ++ * Get statistics information. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_stats structure. ++ * ++ * \return zero on success or a negative number on failure. ++ */ ++int drm_getstats(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_stats *stats = data; ++ int i; ++ ++ memset(stats, 0, sizeof(*stats)); ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ for (i = 0; i < dev->counters; i++) { ++ if (dev->types[i] == _DRM_STAT_LOCK) ++ stats->data[i].value = ++ (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); ++ else ++ stats->data[i].value = atomic_read(&dev->counts[i]); ++ stats->data[i].type = dev->types[i]; ++ } ++ ++ stats->count = dev->counters; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Setversion ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Sets the requested interface version ++ */ ++int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_set_version *sv = data; ++ int if_version, retcode = 0; ++ ++ if (sv->drm_di_major != -1) { ++ if (sv->drm_di_major != DRM_IF_MAJOR || ++ sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ if_version = DRM_IF_VERSION(sv->drm_di_major, ++ sv->drm_di_minor); ++ dev->if_version = max(if_version, dev->if_version); ++ if (sv->drm_di_minor >= 1) { ++ /* ++ * Version 1.1 includes tying of DRM to specific device ++ */ ++ drm_set_busid(dev); ++ } ++ } ++ ++ if (sv->drm_dd_major != -1) { ++ if (sv->drm_dd_major != dev->driver->major || ++ sv->drm_dd_minor < 0 || sv->drm_dd_minor > ++ dev->driver->minor) { ++ retcode = -EINVAL; ++ goto done; ++ } ++ ++ if (dev->driver->set_version) ++ dev->driver->set_version(dev, sv); ++ } ++ ++done: ++ sv->drm_di_major = DRM_IF_MAJOR; ++ sv->drm_di_minor = DRM_IF_MINOR; ++ sv->drm_dd_major = dev->driver->major; ++ sv->drm_dd_minor = dev->driver->minor; ++ ++ return retcode; ++} ++ ++/** No-op ioctl. */ ++int drm_noop(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_irq.c git-nokia/drivers/gpu/drm-tungsten/drm_irq.c +--- git/drivers/gpu/drm-tungsten/drm_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,771 @@ ++/** ++ * \file drm_irq.c ++ * IRQ support ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com ++ * ++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#include /* For task queue support */ ++ ++/** ++ * Get interrupt from bus id. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_irq_busid structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Finds the PCI device with the specified bus id and gets its IRQ number. ++ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal ++ * to that of the device that this DRM instance attached to. ++ */ ++int drm_irq_by_busid(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_irq_busid *p = data; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ if ((p->busnum >> 8) != drm_get_pci_domain(dev) || ++ (p->busnum & 0xff) != dev->pdev->bus->number || ++ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) ++ return -EINVAL; ++ ++ p->irq = dev->pdev->irq; ++ ++ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, ++ p->irq); ++ ++ return 0; ++} ++ ++static void vblank_disable_fn(unsigned long arg) ++{ ++ struct drm_device *dev = (struct drm_device *)arg; ++ unsigned long irqflags; ++ int i; ++ ++ if (!dev->vblank_disable_allowed) ++ return; ++ ++ for (i = 0; i < dev->num_crtcs; i++) { ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ if (atomic_read(&dev->vblank_refcount[i]) == 0 && ++ dev->vblank_enabled[i]) { ++ DRM_DEBUG("disabling vblank on crtc %d\n", i); ++ dev->last_vblank[i] = ++ dev->driver->get_vblank_counter(dev, i); ++ dev->driver->disable_vblank(dev, i); ++ dev->vblank_enabled[i] = 0; ++ } ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ } ++} ++ ++static void drm_vblank_cleanup(struct drm_device *dev) ++{ ++ /* Bail if the driver didn't call drm_vblank_init() */ ++ if (dev->num_crtcs == 0) ++ return; ++ ++ del_timer(&dev->vblank_disable_timer); ++ ++ vblank_disable_fn((unsigned long)dev); ++ ++ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, ++ DRM_MEM_DRIVER); ++ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * ++ dev->num_crtcs, DRM_MEM_DRIVER); ++ ++ dev->num_crtcs = 0; ++} ++ ++int drm_vblank_init(struct drm_device *dev, int num_crtcs) ++{ ++ int i, ret = -ENOMEM; ++ ++ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, ++ (unsigned long)dev); ++ init_timer_deferrable(&dev->vblank_disable_timer); ++ spin_lock_init(&dev->vbl_lock); ++ atomic_set(&dev->vbl_signal_pending, 0); ++ dev->num_crtcs = num_crtcs; ++ ++ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vbl_queue) ++ goto err; ++ ++ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vbl_sigs) ++ goto err; ++ ++ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->_vblank_count) ++ goto err; ++ ++ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_refcount) ++ goto err; ++ ++ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_enabled) ++ goto err; ++ ++ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); ++ if (!dev->last_vblank) ++ goto err; ++ ++ dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), ++ DRM_MEM_DRIVER); ++ if (!dev->vblank_inmodeset) ++ goto err; ++ ++ /* Zero per-crtc vblank stuff */ ++ for (i = 0; i < num_crtcs; i++) { ++ init_waitqueue_head(&dev->vbl_queue[i]); ++ INIT_LIST_HEAD(&dev->vbl_sigs[i]); ++ atomic_set(&dev->_vblank_count[i], 0); ++ atomic_set(&dev->vblank_refcount[i], 0); ++ } ++ ++ dev->vblank_disable_allowed = 0; ++ ++ return 0; ++ ++err: ++ drm_vblank_cleanup(dev); ++ return ret; ++} ++EXPORT_SYMBOL(drm_vblank_init); ++ ++/** ++ * Install IRQ handler. ++ * ++ * \param dev DRM device. ++ * ++ * Initializes the IRQ related data. Installs the handler, calling the driver ++ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions ++ * before and after the installation. ++ */ ++int drm_irq_install(struct drm_device * dev) ++{ ++ int ret = 0; ++ unsigned long sh_flags = 0; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ if (dev->pdev->irq == 0) ++ return -EINVAL; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ /* Driver must have been initialized */ ++ if (!dev->dev_private) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ if (dev->irq_enabled) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EBUSY; ++ } ++ dev->irq_enabled = 1; ++ mutex_unlock(&dev->struct_mutex); ++ ++ DRM_DEBUG("irq=%d\n", dev->pdev->irq); ++ ++ /* Before installing handler */ ++ dev->driver->irq_preinstall(dev); ++ ++ /* Install handler */ ++ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) ++ sh_flags = IRQF_SHARED; ++ ++ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler, ++ sh_flags, dev->devname, dev); ++ if (ret < 0) { ++ mutex_lock(&dev->struct_mutex); ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ /* Expose the device irq to device drivers that want to export it for ++ * whatever reason. ++ */ ++ dev->irq = dev->pdev->irq; ++ ++ /* After installing handler */ ++ ret = dev->driver->irq_postinstall(dev); ++ if (ret < 0) { ++ mutex_lock(&dev->struct_mutex); ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_irq_install); ++ ++/** ++ * Uninstall the IRQ handler. ++ * ++ * \param dev DRM device. ++ * ++ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. ++ */ ++int drm_irq_uninstall(struct drm_device * dev) ++{ ++ int irq_enabled; ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return -EINVAL; ++ ++ mutex_lock(&dev->struct_mutex); ++ irq_enabled = dev->irq_enabled; ++ dev->irq_enabled = 0; ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!irq_enabled) ++ return -EINVAL; ++ ++ DRM_DEBUG("irq=%d\n", dev->pdev->irq); ++ ++ dev->driver->irq_uninstall(dev); ++ ++ free_irq(dev->pdev->irq, dev); ++ ++ drm_vblank_cleanup(dev); ++ ++ dev->locked_tasklet_func = NULL; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_irq_uninstall); ++ ++/** ++ * IRQ control ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_control structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Calls irq_install() or irq_uninstall() according to \p arg. ++ */ ++int drm_control(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_control *ctl = data; ++ ++ /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ ++ ++ ++ switch (ctl->func) { ++ case DRM_INST_HANDLER: ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return 0; ++ if (dev->if_version < DRM_IF_VERSION(1, 2) && ++ ctl->irq != dev->pdev->irq) ++ return -EINVAL; ++ return drm_irq_install(dev); ++ case DRM_UNINST_HANDLER: ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) ++ return 0; ++ return drm_irq_uninstall(dev); ++ default: ++ return -EINVAL; ++ } ++} ++ ++/** ++ * drm_vblank_count - retrieve "cooked" vblank counter value ++ * @dev: DRM device ++ * @crtc: which counter to retrieve ++ * ++ * Fetches the "cooked" vblank count value that represents the number of ++ * vblank events since the system was booted, including lost events due to ++ * modesetting activity. ++ */ ++u32 drm_vblank_count(struct drm_device *dev, int crtc) ++{ ++ return atomic_read(&dev->_vblank_count[crtc]); ++} ++EXPORT_SYMBOL(drm_vblank_count); ++ ++/** ++ * drm_update_vblank_count - update the master vblank counter ++ * @dev: DRM device ++ * @crtc: counter to update ++ * ++ * Call back into the driver to update the appropriate vblank counter ++ * (specified by @crtc). Deal with wraparound, if it occurred, and ++ * update the last read value so we can deal with wraparound on the next ++ * call if necessary. ++ * ++ * Only necessary when going from off->on, to account for frames we ++ * didn't get an interrupt for. ++ * ++ * Note: caller must hold dev->vbl_lock since this reads & writes ++ * device vblank fields. ++ */ ++static void drm_update_vblank_count(struct drm_device *dev, int crtc) ++{ ++ u32 cur_vblank, diff; ++ ++ /* ++ * Interrupts were disabled prior to this call, so deal with counter ++ * wrap if needed. ++ * NOTE! It's possible we lost a full dev->max_vblank_count events ++ * here if the register is small or we had vblank interrupts off for ++ * a long time. ++ */ ++ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); ++ diff = cur_vblank - dev->last_vblank[crtc]; ++ if (cur_vblank < dev->last_vblank[crtc]) { ++ diff += dev->max_vblank_count; ++ ++ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", ++ crtc, dev->last_vblank[crtc], cur_vblank, diff); ++ } ++ ++ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", ++ crtc, diff); ++ ++ atomic_add(diff, &dev->_vblank_count[crtc]); ++} ++ ++/** ++ * drm_vblank_get - get a reference count on vblank events ++ * @dev: DRM device ++ * @crtc: which CRTC to own ++ * ++ * Acquire a reference count on vblank events to avoid having them disabled ++ * while in use. ++ * ++ * RETURNS ++ * Zero on success, nonzero on failure. ++ */ ++int drm_vblank_get(struct drm_device *dev, int crtc) ++{ ++ unsigned long irqflags; ++ int ret = 0; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ /* Going from 0->1 means we have to enable interrupts again */ ++ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && ++ !dev->vblank_enabled[crtc]) { ++ ret = dev->driver->enable_vblank(dev, crtc); ++ if (ret) ++ atomic_dec(&dev->vblank_refcount[crtc]); ++ else { ++ dev->vblank_enabled[crtc] = 1; ++ drm_update_vblank_count(dev, crtc); ++ } ++ } ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_vblank_get); ++ ++/** ++ * drm_vblank_put - give up ownership of vblank events ++ * @dev: DRM device ++ * @crtc: which counter to give up ++ * ++ * Release ownership of a given vblank counter, turning off interrupts ++ * if possible. ++ */ ++void drm_vblank_put(struct drm_device *dev, int crtc) ++{ ++ /* Last user schedules interrupt disable */ ++ if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) ++ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); ++} ++EXPORT_SYMBOL(drm_vblank_put); ++ ++/** ++ * drm_modeset_ctl - handle vblank event counter changes across mode switch ++ * @DRM_IOCTL_ARGS: standard ioctl arguments ++ * ++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET ++ * ioctls around modesetting so that any lost vblank events are accounted for. ++ * ++ * Generally the counter will reset across mode sets. If interrupts are ++ * enabled around this call, we don't have to do anything since the counter ++ * will have already been incremented. ++ */ ++int drm_modeset_ctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_modeset_ctl *modeset = data; ++ unsigned long irqflags; ++ int crtc, ret = 0; ++ ++ /* If drm_vblank_init() hasn't been called yet, just no-op */ ++ if (!dev->num_crtcs) ++ goto out; ++ ++ crtc = modeset->crtc; ++ if (crtc >= dev->num_crtcs) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* ++ * To avoid all the problems that might happen if interrupts ++ * were enabled/disabled around or between these calls, we just ++ * have the kernel take a reference on the CRTC (just once though ++ * to avoid corrupting the count if multiple, mismatch calls occur), ++ * so that interrupts remain enabled in the interim. ++ */ ++ switch (modeset->cmd) { ++ case _DRM_PRE_MODESET: ++ if (!dev->vblank_inmodeset[crtc]) { ++ dev->vblank_inmodeset[crtc] = 1; ++ drm_vblank_get(dev, crtc); ++ } ++ break; ++ case _DRM_POST_MODESET: ++ if (dev->vblank_inmodeset[crtc]) { ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ dev->vblank_disable_allowed = 1; ++ dev->vblank_inmodeset[crtc] = 0; ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ drm_vblank_put(dev, crtc); ++ } ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++out: ++ return ret; ++} ++ ++/** ++ * Wait for VBLANK. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param data user argument, pointing to a drm_wait_vblank structure. ++ * \return zero on success or a negative number on failure. ++ * ++ * Verifies the IRQ is installed. ++ * ++ * If a signal is requested checks if this task has already scheduled the same signal ++ * for the same vblank sequence number - nothing to be done in ++ * that case. If the number of tasks waiting for the interrupt exceeds 100 the ++ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this ++ * task. ++ * ++ * If a signal is not requested, then calls vblank_wait(). ++ */ ++int drm_wait_vblank(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ union drm_wait_vblank *vblwait = data; ++ int ret = 0; ++ unsigned int flags, seq, crtc; ++ ++ if ((!dev->pdev->irq) || (!dev->irq_enabled)) ++ return -EINVAL; ++ ++ if (vblwait->request.type & ++ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { ++ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", ++ vblwait->request.type, ++ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); ++ return -EINVAL; ++ } ++ ++ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; ++ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; ++ ++ if (crtc >= dev->num_crtcs) ++ return -EINVAL; ++ ++ ret = drm_vblank_get(dev, crtc); ++ if (ret) ++ return ret; ++ seq = drm_vblank_count(dev, crtc); ++ ++ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { ++ case _DRM_VBLANK_RELATIVE: ++ vblwait->request.sequence += seq; ++ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; ++ case _DRM_VBLANK_ABSOLUTE: ++ break; ++ default: ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ if ((flags & _DRM_VBLANK_NEXTONMISS) && ++ (seq - vblwait->request.sequence) <= (1<<23)) { ++ vblwait->request.sequence = seq + 1; ++ } ++ ++ if (flags & _DRM_VBLANK_SIGNAL) { ++ unsigned long irqflags; ++ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; ++ struct drm_vbl_sig *vbl_sig; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ ++ /* Check if this task has already scheduled the same signal ++ * for the same vblank sequence number; nothing to be done in ++ * that case ++ */ ++ list_for_each_entry(vbl_sig, vbl_sigs, head) { ++ if (vbl_sig->sequence == vblwait->request.sequence ++ && vbl_sig->info.si_signo == ++ vblwait->request.signal ++ && vbl_sig->task == current) { ++ spin_unlock_irqrestore(&dev->vbl_lock, ++ irqflags); ++ vblwait->reply.sequence = seq; ++ goto done; ++ } ++ } ++ ++ if (atomic_read(&dev->vbl_signal_pending) >= 100) { ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ret = -EBUSY; ++ goto done; ++ } ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), ++ DRM_MEM_DRIVER); ++ if (!vbl_sig) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ ret = drm_vblank_get(dev, crtc); ++ if (ret) { ++ drm_free(vbl_sig, sizeof(struct drm_vbl_sig), ++ DRM_MEM_DRIVER); ++ return ret; ++ } ++ ++ atomic_inc(&dev->vbl_signal_pending); ++ ++ vbl_sig->sequence = vblwait->request.sequence; ++ vbl_sig->info.si_signo = vblwait->request.signal; ++ vbl_sig->task = current; ++ ++ spin_lock_irqsave(&dev->vbl_lock, irqflags); ++ ++ list_add_tail(&vbl_sig->head, vbl_sigs); ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, irqflags); ++ ++ vblwait->reply.sequence = seq; ++ } else { ++ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, ++ ((drm_vblank_count(dev, crtc) ++ - vblwait->request.sequence) <= (1 << 23))); ++ ++ if (ret != -EINTR) { ++ struct timeval now; ++ ++ do_gettimeofday(&now); ++ ++ vblwait->reply.tval_sec = now.tv_sec; ++ vblwait->reply.tval_usec = now.tv_usec; ++ vblwait->reply.sequence = drm_vblank_count(dev, crtc); ++ } ++ } ++ ++done: ++ drm_vblank_put(dev, crtc); ++ return ret; ++} ++ ++/** ++ * Send the VBLANK signals. ++ * ++ * \param dev DRM device. ++ * \param crtc CRTC where the vblank event occurred ++ * ++ * Sends a signal for each task in drm_device::vbl_sigs and empties the list. ++ * ++ * If a signal is not requested, then calls vblank_wait(). ++ */ ++static void drm_vbl_send_signals(struct drm_device * dev, int crtc) ++{ ++ struct drm_vbl_sig *vbl_sig, *tmp; ++ struct list_head *vbl_sigs; ++ unsigned int vbl_seq; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->vbl_lock, flags); ++ ++ vbl_sigs = &dev->vbl_sigs[crtc]; ++ vbl_seq = drm_vblank_count(dev, crtc); ++ ++ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { ++ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { ++ vbl_sig->info.si_code = vbl_seq; ++ send_sig_info(vbl_sig->info.si_signo, ++ &vbl_sig->info, vbl_sig->task); ++ ++ list_del(&vbl_sig->head); ++ ++ drm_free(vbl_sig, sizeof(*vbl_sig), ++ DRM_MEM_DRIVER); ++ atomic_dec(&dev->vbl_signal_pending); ++ drm_vblank_put(dev, crtc); ++ } ++ } ++ ++ spin_unlock_irqrestore(&dev->vbl_lock, flags); ++} ++ ++/** ++ * drm_handle_vblank - handle a vblank event ++ * @dev: DRM device ++ * @crtc: where this event occurred ++ * ++ * Drivers should call this routine in their vblank interrupt handlers to ++ * update the vblank counter and send any signals that may be pending. ++ */ ++void drm_handle_vblank(struct drm_device *dev, int crtc) ++{ ++ atomic_inc(&dev->_vblank_count[crtc]); ++ DRM_WAKEUP(&dev->vbl_queue[crtc]); ++ drm_vbl_send_signals(dev, crtc); ++} ++EXPORT_SYMBOL(drm_handle_vblank); ++ ++/** ++ * Tasklet wrapper function. ++ * ++ * \param data DRM device in disguise. ++ * ++ * Attempts to grab the HW lock and calls the driver callback on success. On ++ * failure, leave the lock marked as contended so the callback can be called ++ * from drm_unlock(). ++ */ ++static void drm_locked_tasklet_func(unsigned long data) ++{ ++ struct drm_device *dev = (struct drm_device *)data; ++ unsigned long irqflags; ++ void (*tasklet_func)(struct drm_device *); ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ if (!tasklet_func || ++ !drm_lock_take(&dev->lock, ++ DRM_KERNEL_CONTEXT)) { ++ return; ++ } ++ ++ dev->lock.lock_time = jiffies; ++ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ dev->locked_tasklet_func = NULL; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ if (tasklet_func != NULL) ++ tasklet_func(dev); ++ ++ drm_lock_free(&dev->lock, ++ DRM_KERNEL_CONTEXT); ++} ++ ++/** ++ * Schedule a tasklet to call back a driver hook with the HW lock held. ++ * ++ * \param dev DRM device. ++ * \param func Driver callback. ++ * ++ * This is intended for triggering actions that require the HW lock from an ++ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler ++ * completes. Note that the callback may be called from interrupt or process ++ * context, it must not make any assumptions about this. Also, the HW lock will ++ * be held with the kernel context or any client context. ++ */ ++void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *)) ++{ ++ unsigned long irqflags; ++ static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); ++ ++ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || ++ test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) ++ return; ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ ++ if (dev->locked_tasklet_func) { ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ return; ++ } ++ ++ dev->locked_tasklet_func = func; ++ ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ ++ drm_tasklet.data = (unsigned long)dev; ++ ++ tasklet_hi_schedule(&drm_tasklet); ++} ++EXPORT_SYMBOL(drm_locked_tasklet); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_lock.c +--- git/drivers/gpu/drm-tungsten/drm_lock.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_lock.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,389 @@ ++/** ++ * \file drm_lock.c ++ * IOCTLs for locking ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++static int drm_notifier(void *priv); ++ ++/** ++ * Lock ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Add the current task to the lock wait queue, and attempt to take to lock. ++ */ ++int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DECLARE_WAITQUEUE(entry, current); ++ struct drm_lock *lock = data; ++ int ret = 0; ++ ++ ++file_priv->lock_count; ++ ++ if (lock->context == DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("Process %d using kernel context %d\n", ++ current->pid, lock->context); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", ++ lock->context, current->pid, ++ dev->lock.hw_lock->lock, lock->flags); ++ ++ if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) ++ if (lock->context < 0) ++ return -EINVAL; ++ ++ add_wait_queue(&dev->lock.lock_queue, &entry); ++ spin_lock_bh(&dev->lock.spinlock); ++ dev->lock.user_waiters++; ++ spin_unlock_bh(&dev->lock.spinlock); ++ for (;;) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ if (!dev->lock.hw_lock) { ++ /* Device has been unregistered */ ++ ret = -EINTR; ++ break; ++ } ++ if (drm_lock_take(&dev->lock, lock->context)) { ++ dev->lock.file_priv = file_priv; ++ dev->lock.lock_time = jiffies; ++ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ break; /* Got lock */ ++ } ++ ++ /* Contention */ ++ schedule(); ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ } ++ spin_lock_bh(&dev->lock.spinlock); ++ dev->lock.user_waiters--; ++ spin_unlock_bh(&dev->lock.spinlock); ++ __set_current_state(TASK_RUNNING); ++ remove_wait_queue(&dev->lock.lock_queue, &entry); ++ ++ DRM_DEBUG("%d %s\n", lock->context, ++ ret ? "interrupted" : "has lock"); ++ if (ret) return ret; ++ ++ /* don't set the block all signals on the master process for now ++ * really probably not the correct answer but lets us debug xkb ++ * xserver for now */ ++ if (!file_priv->master) { ++ sigemptyset(&dev->sigmask); ++ sigaddset(&dev->sigmask, SIGSTOP); ++ sigaddset(&dev->sigmask, SIGTSTP); ++ sigaddset(&dev->sigmask, SIGTTIN); ++ sigaddset(&dev->sigmask, SIGTTOU); ++ dev->sigdata.context = lock->context; ++ dev->sigdata.lock = dev->lock.hw_lock; ++ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); ++ } ++ ++ if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) ++ dev->driver->dma_ready(dev); ++ ++ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) ++ { ++ if (dev->driver->dma_quiescent(dev)) { ++ DRM_DEBUG("%d waiting for DMA quiescent\n", ++ lock->context); ++ return -EBUSY; ++ } ++ } ++ ++ if (dev->driver->kernel_context_switch && ++ dev->last_context != lock->context) { ++ dev->driver->kernel_context_switch(dev, dev->last_context, ++ lock->context); ++ } ++ ++ return 0; ++} ++ ++/** ++ * Unlock ioctl. ++ * ++ * \param inode device inode. ++ * \param file_priv DRM file private. ++ * \param cmd command. ++ * \param arg user argument, pointing to a drm_lock structure. ++ * \return zero on success or negative number on failure. ++ * ++ * Transfer and free the lock. ++ */ ++int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_lock *lock = data; ++ unsigned long irqflags; ++ void (*tasklet_func)(struct drm_device *); ++ ++ if (lock->context == DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("Process %d using kernel context %d\n", ++ current->pid, lock->context); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&dev->tasklet_lock, irqflags); ++ tasklet_func = dev->locked_tasklet_func; ++ dev->locked_tasklet_func = NULL; ++ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); ++ if (tasklet_func != NULL) ++ tasklet_func(dev); ++ ++ atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); ++ ++ /* kernel_context_switch isn't used by any of the x86 drm ++ * modules but is required by the Sparc driver. ++ */ ++ if (dev->driver->kernel_context_switch_unlock) ++ dev->driver->kernel_context_switch_unlock(dev); ++ else { ++ if (drm_lock_free(&dev->lock,lock->context)) { ++ /* FIXME: Should really bail out here. */ ++ } ++ } ++ ++ unblock_all_signals(); ++ return 0; ++} ++ ++/** ++ * Take the heavyweight lock. ++ * ++ * \param lock lock pointer. ++ * \param context locking context. ++ * \return one if the lock is held, or zero otherwise. ++ * ++ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. ++ */ ++int drm_lock_take(struct drm_lock_data *lock_data, ++ unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ do { ++ old = *lock; ++ if (old & _DRM_LOCK_HELD) ++ new = old | _DRM_LOCK_CONT; ++ else { ++ new = context | _DRM_LOCK_HELD | ++ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? ++ _DRM_LOCK_CONT : 0); ++ } ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ spin_unlock_bh(&lock_data->spinlock); ++ ++ /* Warn on recursive locking of user contexts. */ ++ if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) { ++ if (context != DRM_KERNEL_CONTEXT) { ++ DRM_ERROR("%d holds heavyweight lock\n", ++ context); ++ } ++ return 0; ++ } ++ ++ return !_DRM_LOCK_IS_HELD(old); ++} ++ ++/** ++ * This takes a lock forcibly and hands it to context. Should ONLY be used ++ * inside *_unlock to give lock to kernel before calling *_dma_schedule. ++ * ++ * \param dev DRM device. ++ * \param lock lock pointer. ++ * \param context locking context. ++ * \return always one. ++ * ++ * Resets the lock file pointer. ++ * Marks the lock as held by the given context, via the \p cmpxchg instruction. ++ */ ++static int drm_lock_transfer(struct drm_lock_data *lock_data, ++ unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ lock_data->file_priv = NULL; ++ do { ++ old = *lock; ++ new = context | _DRM_LOCK_HELD; ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ return 1; ++} ++ ++/** ++ * Free lock. ++ * ++ * \param dev DRM device. ++ * \param lock lock. ++ * \param context context. ++ * ++ * Resets the lock file pointer. ++ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task ++ * waiting on the lock queue. ++ */ ++int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) ++{ ++ unsigned int old, new, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ if (lock_data->kernel_waiters != 0) { ++ drm_lock_transfer(lock_data, 0); ++ lock_data->idle_has_lock = 1; ++ spin_unlock_bh(&lock_data->spinlock); ++ return 1; ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++ ++ do { ++ old = *lock; ++ new = _DRM_LOCKING_CONTEXT(old); ++ prev = cmpxchg(lock, old, new); ++ } while (prev != old); ++ ++ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { ++ DRM_ERROR("%d freed heavyweight lock held by %d\n", ++ context, _DRM_LOCKING_CONTEXT(old)); ++ return 1; ++ } ++ wake_up_interruptible(&lock_data->lock_queue); ++ return 0; ++} ++ ++/** ++ * If we get here, it means that the process has called DRM_IOCTL_LOCK ++ * without calling DRM_IOCTL_UNLOCK. ++ * ++ * If the lock is not held, then let the signal proceed as usual. If the lock ++ * is held, then set the contended flag and keep the signal blocked. ++ * ++ * \param priv pointer to a drm_sigdata structure. ++ * \return one if the signal should be delivered normally, or zero if the ++ * signal should be blocked. ++ */ ++static int drm_notifier(void *priv) ++{ ++ struct drm_sigdata *s = (struct drm_sigdata *) priv; ++ unsigned int old, new, prev; ++ ++ /* Allow signal delivery if lock isn't held */ ++ if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) ++ || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) ++ return 1; ++ ++ /* Otherwise, set flag to force call to ++ drmUnlock */ ++ do { ++ old = s->lock->lock; ++ new = old | _DRM_LOCK_CONT; ++ prev = cmpxchg(&s->lock->lock, old, new); ++ } while (prev != old); ++ return 0; ++} ++ ++/** ++ * This function returns immediately and takes the hw lock ++ * with the kernel context if it is free, otherwise it gets the highest priority when and if ++ * it is eventually released. ++ * ++ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held ++ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause ++ * a deadlock, which is why the "idlelock" was invented). ++ * ++ * This should be sufficient to wait for GPU idle without ++ * having to worry about starvation. ++ */ ++ ++void drm_idlelock_take(struct drm_lock_data *lock_data) ++{ ++ int ret = 0; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ lock_data->kernel_waiters++; ++ if (!lock_data->idle_has_lock) { ++ ++ spin_unlock_bh(&lock_data->spinlock); ++ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); ++ spin_lock_bh(&lock_data->spinlock); ++ ++ if (ret == 1) ++ lock_data->idle_has_lock = 1; ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++} ++EXPORT_SYMBOL(drm_idlelock_take); ++ ++void drm_idlelock_release(struct drm_lock_data *lock_data) ++{ ++ unsigned int old, prev; ++ volatile unsigned int *lock = &lock_data->hw_lock->lock; ++ ++ spin_lock_bh(&lock_data->spinlock); ++ if (--lock_data->kernel_waiters == 0) { ++ if (lock_data->idle_has_lock) { ++ do { ++ old = *lock; ++ prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); ++ } while (prev != old); ++ wake_up_interruptible(&lock_data->lock_queue); ++ lock_data->idle_has_lock = 0; ++ } ++ } ++ spin_unlock_bh(&lock_data->spinlock); ++} ++EXPORT_SYMBOL(drm_idlelock_release); ++ ++int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ ++ return (file_priv->lock_count && dev->lock.hw_lock && ++ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && ++ dev->lock.file_priv == file_priv); ++} ++ ++EXPORT_SYMBOL(drm_i_have_hw_lock); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.c git-nokia/drivers/gpu/drm-tungsten/drm_memory.c +--- git/drivers/gpu/drm-tungsten/drm_memory.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,374 @@ ++/** ++ * \file drm_memory.c ++ * Memory management wrappers for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++static struct { ++ spinlock_t lock; ++ uint64_t cur_used; ++ uint64_t emer_used; ++ uint64_t low_threshold; ++ uint64_t high_threshold; ++ uint64_t emer_threshold; ++} drm_memctl = { ++ .lock = SPIN_LOCK_UNLOCKED ++}; ++ ++static inline size_t drm_size_align(size_t size) ++{ ++ size_t tmpSize = 4; ++ if (size > PAGE_SIZE) ++ return PAGE_ALIGN(size); ++ ++ while (tmpSize < size) ++ tmpSize <<= 1; ++ ++ return (size_t) tmpSize; ++} ++ ++int drm_alloc_memctl(size_t size) ++{ ++ int ret = 0; ++ unsigned long a_size = drm_size_align(size); ++ unsigned long new_used; ++ ++ spin_lock(&drm_memctl.lock); ++ new_used = drm_memctl.cur_used + a_size; ++ if (likely(new_used < drm_memctl.high_threshold)) { ++ drm_memctl.cur_used = new_used; ++ goto out; ++ } ++ ++ /* ++ * Allow small allocations from root-only processes to ++ * succeed until the emergency threshold is reached. ++ */ ++ ++ new_used += drm_memctl.emer_used; ++ if (unlikely(!DRM_SUSER(DRM_CURPROC) || ++ (a_size > 16*PAGE_SIZE) || ++ (new_used > drm_memctl.emer_threshold))) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ drm_memctl.cur_used = drm_memctl.high_threshold; ++ drm_memctl.emer_used = new_used - drm_memctl.high_threshold; ++out: ++ spin_unlock(&drm_memctl.lock); ++ return ret; ++} ++EXPORT_SYMBOL(drm_alloc_memctl); ++ ++ ++void drm_free_memctl(size_t size) ++{ ++ unsigned long a_size = drm_size_align(size); ++ ++ spin_lock(&drm_memctl.lock); ++ if (likely(a_size >= drm_memctl.emer_used)) { ++ a_size -= drm_memctl.emer_used; ++ drm_memctl.emer_used = 0; ++ } else { ++ drm_memctl.emer_used -= a_size; ++ a_size = 0; ++ } ++ drm_memctl.cur_used -= a_size; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_free_memctl); ++ ++void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold) ++{ ++ spin_lock(&drm_memctl.lock); ++ *cur_used = drm_memctl.cur_used; ++ *emer_used = drm_memctl.emer_used; ++ *low_threshold = drm_memctl.low_threshold; ++ *high_threshold = drm_memctl.high_threshold; ++ *emer_threshold = drm_memctl.emer_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++EXPORT_SYMBOL(drm_query_memctl); ++ ++void drm_init_memctl(size_t p_low_threshold, ++ size_t p_high_threshold, ++ size_t unit_size) ++{ ++ spin_lock(&drm_memctl.lock); ++ drm_memctl.emer_used = 0; ++ drm_memctl.cur_used = 0; ++ drm_memctl.low_threshold = p_low_threshold * unit_size; ++ drm_memctl.high_threshold = p_high_threshold * unit_size; ++ drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) + ++ drm_memctl.high_threshold; ++ spin_unlock(&drm_memctl.lock); ++} ++ ++ ++#ifndef DEBUG_MEMORY ++ ++/** No-op. */ ++void drm_mem_init(void) ++{ ++} ++ ++/** ++ * Called when "/proc/dri/%dev%/mem" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param len requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * No-op. ++ */ ++int drm_mem_info(char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) ++{ ++ return 0; ++} ++ ++/** Wrapper around kmalloc() */ ++void *drm_calloc(size_t nmemb, size_t size, int area) ++{ ++ return kcalloc(nmemb, size, GFP_KERNEL); ++} ++EXPORT_SYMBOL(drm_calloc); ++ ++/** Wrapper around kmalloc() and kfree() */ ++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) ++{ ++ void *pt; ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, DRM_MIN(oldsize,size)); ++ kfree(oldpt); ++ } ++ return pt; ++} ++ ++/** ++ * Allocate pages. ++ * ++ * \param order size order. ++ * \param area memory area. (Not used.) ++ * \return page address on success, or zero on failure. ++ * ++ * Allocate and reserve free pages. ++ */ ++unsigned long drm_alloc_pages(int order, int area) ++{ ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) ++ return 0; ++ ++ /* Zero */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++/** ++ * Free pages. ++ * ++ * \param address address of the pages to free. ++ * \param order size order. ++ * \param area memory area. (Not used.) ++ * ++ * Unreserve and free pages allocated by alloc_pages(). ++ */ ++void drm_free_pages(unsigned long address, int order, int area) ++{ ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) ++ return; ++ ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ ++ free_pages(address, order); ++} ++ ++#if __OS_HAS_AGP ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ unsigned long *phys_addr_map, i, num_pages = ++ PAGE_ALIGN(size) / PAGE_SIZE; ++ struct drm_agp_mem *agpmem; ++ struct page **page_map; ++ void *addr; ++ ++ size = PAGE_ALIGN(size); ++ ++#ifdef __alpha__ ++ offset -= dev->hose->mem_space->start; ++#endif ++ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) ++ if (agpmem->bound <= offset ++ && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= ++ (offset + size)) ++ break; ++ if (!agpmem) ++ return NULL; ++ ++ /* ++ * OK, we're mapping AGP space on a chipset/platform on which memory accesses by ++ * the CPU do not get remapped by the GART. We fix this by using the kernel's ++ * page-table instead (that's probably faster anyhow...). ++ */ ++ /* note: use vmalloc() because num_pages could be large... */ ++ page_map = vmalloc(num_pages * sizeof(struct page *)); ++ if (!page_map) ++ return NULL; ++ ++ phys_addr_map = ++ agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; ++ for (i = 0; i < num_pages; ++i) ++ page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); ++ addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); ++ vfree(page_map); ++ ++ return addr; ++} ++ ++/** Wrapper around agp_allocate_memory() */ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ return drm_agp_allocate_memory(pages, type); ++} ++#else ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ return drm_agp_allocate_memory(dev->agp->bridge, pages, type); ++} ++#endif ++ ++/** Wrapper around agp_free_memory() */ ++int drm_free_agp(DRM_AGP_MEM * handle, int pages) ++{ ++ return drm_agp_free_memory(handle) ? 0 : -EINVAL; ++} ++EXPORT_SYMBOL(drm_free_agp); ++ ++/** Wrapper around agp_bind_memory() */ ++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ++{ ++ return drm_agp_bind_memory(handle, start); ++} ++ ++/** Wrapper around agp_unbind_memory() */ ++int drm_unbind_agp(DRM_AGP_MEM * handle) ++{ ++ return drm_agp_unbind_memory(handle); ++} ++EXPORT_SYMBOL(drm_unbind_agp); ++ ++#else /* __OS_HAS_AGP*/ ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ return NULL; ++} ++#endif /* agp */ ++#else ++static void *agp_remap(unsigned long offset, unsigned long size, ++ struct drm_device * dev) ++{ ++ return NULL; ++} ++#endif /* debug_memory */ ++ ++void drm_core_ioremap(struct drm_map *map, struct drm_device *dev) ++{ ++ if (drm_core_has_AGP(dev) && ++ dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) ++ map->handle = agp_remap(map->offset, map->size, dev); ++ else ++ map->handle = ioremap(map->offset, map->size); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremap); ++ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) ++void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) ++{ ++ map->handle = ioremap_wc(map->offset, map->size); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremap_wc); ++#endif ++ ++void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) ++{ ++ if (!map->handle || !map->size) ++ return; ++ ++ if (drm_core_has_AGP(dev) && ++ dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) ++ vunmap(map->handle); ++ else ++ iounmap(map->handle); ++} ++EXPORT_SYMBOL_GPL(drm_core_ioremapfree); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.c git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c +--- git/drivers/gpu/drm-tungsten/drm_memory_debug.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,403 @@ ++/** ++ * \file drm_memory_debug.c ++ * Memory management wrappers for DRM. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#ifdef DEBUG_MEMORY ++ ++typedef struct drm_mem_stats { ++ const char *name; ++ int succeed_count; ++ int free_count; ++ int fail_count; ++ unsigned long bytes_allocated; ++ unsigned long bytes_freed; ++} drm_mem_stats_t; ++ ++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; ++static unsigned long drm_ram_available = 0; /* In pages */ ++static unsigned long drm_ram_used = 0; ++static drm_mem_stats_t drm_mem_stats[] = { ++ [DRM_MEM_DMA] = {"dmabufs"}, ++ [DRM_MEM_SAREA] = {"sareas"}, ++ [DRM_MEM_DRIVER] = {"driver"}, ++ [DRM_MEM_MAGIC] = {"magic"}, ++ [DRM_MEM_IOCTLS] = {"ioctltab"}, ++ [DRM_MEM_MAPS] = {"maplist"}, ++ [DRM_MEM_VMAS] = {"vmalist"}, ++ [DRM_MEM_BUFS] = {"buflist"}, ++ [DRM_MEM_SEGS] = {"seglist"}, ++ [DRM_MEM_PAGES] = {"pagelist"}, ++ [DRM_MEM_FILES] = {"files"}, ++ [DRM_MEM_QUEUES] = {"queues"}, ++ [DRM_MEM_CMDS] = {"commands"}, ++ [DRM_MEM_MAPPINGS] = {"mappings"}, ++ [DRM_MEM_BUFLISTS] = {"buflists"}, ++ [DRM_MEM_AGPLISTS] = {"agplist"}, ++ [DRM_MEM_SGLISTS] = {"sglist"}, ++ [DRM_MEM_TOTALAGP] = {"totalagp"}, ++ [DRM_MEM_BOUNDAGP] = {"boundagp"}, ++ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, ++ [DRM_MEM_CTXLIST] = {"ctxlist"}, ++ [DRM_MEM_STUB] = {"stub"}, ++ {NULL, 0,} /* Last entry must be null */ ++}; ++ ++void drm_mem_init(void) ++{ ++ drm_mem_stats_t *mem; ++ struct sysinfo si; ++ ++ for (mem = drm_mem_stats; mem->name; ++mem) { ++ mem->succeed_count = 0; ++ mem->free_count = 0; ++ mem->fail_count = 0; ++ mem->bytes_allocated = 0; ++ mem->bytes_freed = 0; ++ } ++ ++ si_meminfo(&si); ++ drm_ram_available = si.totalram; ++ drm_ram_used = 0; ++} ++ ++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ ++ ++static int drm__mem_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ drm_mem_stats_t *pt; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *eof = 0; ++ *start = &buf[offset]; ++ ++ DRM_PROC_PRINT(" total counts " ++ " | outstanding \n"); ++ DRM_PROC_PRINT("type alloc freed fail bytes freed" ++ " | allocs bytes\n\n"); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "system", 0, 0, 0, ++ drm_ram_available << (PAGE_SHIFT - 10)); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "locked", 0, 0, 0, drm_ram_used >> 10); ++ DRM_PROC_PRINT("\n"); ++ for (pt = drm_mem_stats; pt->name; pt++) { ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", ++ pt->name, ++ pt->succeed_count, ++ pt->free_count, ++ pt->fail_count, ++ pt->bytes_allocated, ++ pt->bytes_freed, ++ pt->succeed_count - pt->free_count, ++ (long)pt->bytes_allocated ++ - (long)pt->bytes_freed); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++int drm_mem_info(char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) ++{ ++ int ret; ++ ++ spin_lock(&drm_mem_lock); ++ ret = drm__mem_info(buf, start, offset, len, eof, data); ++ spin_unlock(&drm_mem_lock); ++ return ret; ++} ++ ++void *drm_alloc(size_t size, int area) ++{ ++ void *pt; ++ ++ if (!size) { ++ DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); ++ return NULL; ++ } ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ spin_unlock(&drm_mem_lock); ++ return pt; ++} ++EXPORT_SYMBOL(drm_alloc); ++ ++void *drm_calloc(size_t nmemb, size_t size, int area) ++{ ++ void *addr; ++ ++ addr = drm_alloc(nmemb * size, area); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++EXPORT_SYMBOL(drm_calloc); ++ ++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area) ++{ ++ void *pt; ++ ++ if (!(pt = drm_alloc(size, area))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, oldsize); ++ drm_free(oldpt, oldsize, area); ++ } ++ return pt; ++} ++EXPORT_SYMBOL(drm_realloc); ++ ++void drm_free(void *pt, size_t size, int area) ++{ ++ int alloc_count; ++ int free_count; ++ ++ if (!pt) ++ DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); ++ else ++ kfree(pt); ++ spin_lock(&drm_mem_lock); ++ drm_mem_stats[area].bytes_freed += size; ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++EXPORT_SYMBOL(drm_free); ++ ++unsigned long drm_alloc_pages(int order, int area) ++{ ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += bytes; ++ drm_ram_used += bytes; ++ spin_unlock(&drm_mem_lock); ++ ++ /* Zero outside the lock */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++void drm_free_pages(unsigned long address, int order, int area) ++{ ++ unsigned long bytes = PAGE_SIZE << order; ++ int alloc_count; ++ int free_count; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) { ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++ } else { ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ free_pages(address, order); ++ } ++ ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += bytes; ++ drm_ram_used -= bytes; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++#if __OS_HAS_AGP ++ ++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) ++{ ++ DRM_AGP_MEM *handle; ++ ++ if (!pages) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); ++ return NULL; ++ } ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++ if ((handle = drm_agp_allocate_memory(pages, type))) { ++#else ++ if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) { ++#endif ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return handle; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++} ++ ++int drm_free_agp(DRM_AGP_MEM * handle, int pages) ++{ ++ int alloc_count; ++ int free_count; ++ int retval = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Attempt to free NULL AGP handle\n"); ++ return retval; ++ } ++ ++ if (drm_agp_free_memory(handle)) { ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return 0; ++ } ++ return retval; ++} ++ ++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ++{ ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to bind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if (!(retcode = drm_agp_bind_memory(handle, start))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++} ++ ++int drm_unbind_agp(DRM_AGP_MEM * handle) ++{ ++ int alloc_count; ++ int free_count; ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to unbind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if ((retcode = drm_agp_unbind_memory(handle))) ++ return retcode; ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return retcode; ++} ++ ++#endif ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.h git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h +--- git/drivers/gpu/drm-tungsten/drm_memory_debug.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,379 @@ ++/** ++ * \file drm_memory_debug.h ++ * Memory management wrappers for DRM. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++typedef struct drm_mem_stats { ++ const char *name; ++ int succeed_count; ++ int free_count; ++ int fail_count; ++ unsigned long bytes_allocated; ++ unsigned long bytes_freed; ++} drm_mem_stats_t; ++ ++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED; ++static unsigned long drm_ram_available = 0; /* In pages */ ++static unsigned long drm_ram_used = 0; ++static drm_mem_stats_t drm_mem_stats[] = ++{ ++ [DRM_MEM_DMA] = {"dmabufs"}, ++ [DRM_MEM_SAREA] = {"sareas"}, ++ [DRM_MEM_DRIVER] = {"driver"}, ++ [DRM_MEM_MAGIC] = {"magic"}, ++ [DRM_MEM_IOCTLS] = {"ioctltab"}, ++ [DRM_MEM_MAPS] = {"maplist"}, ++ [DRM_MEM_VMAS] = {"vmalist"}, ++ [DRM_MEM_BUFS] = {"buflist"}, ++ [DRM_MEM_SEGS] = {"seglist"}, ++ [DRM_MEM_PAGES] = {"pagelist"}, ++ [DRM_MEM_FILES] = {"files"}, ++ [DRM_MEM_QUEUES] = {"queues"}, ++ [DRM_MEM_CMDS] = {"commands"}, ++ [DRM_MEM_MAPPINGS] = {"mappings"}, ++ [DRM_MEM_BUFLISTS] = {"buflists"}, ++ [DRM_MEM_AGPLISTS] = {"agplist"}, ++ [DRM_MEM_SGLISTS] = {"sglist"}, ++ [DRM_MEM_TOTALAGP] = {"totalagp"}, ++ [DRM_MEM_BOUNDAGP] = {"boundagp"}, ++ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"}, ++ [DRM_MEM_CTXLIST] = {"ctxlist"}, ++ [DRM_MEM_STUB] = {"stub"}, ++ {NULL, 0,} /* Last entry must be null */ ++}; ++ ++void drm_mem_init (void) { ++ drm_mem_stats_t *mem; ++ struct sysinfo si; ++ ++ for (mem = drm_mem_stats; mem->name; ++mem) { ++ mem->succeed_count = 0; ++ mem->free_count = 0; ++ mem->fail_count = 0; ++ mem->bytes_allocated = 0; ++ mem->bytes_freed = 0; ++ } ++ ++ si_meminfo(&si); ++ drm_ram_available = si.totalram; ++ drm_ram_used = 0; ++} ++ ++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ ++ ++static int drm__mem_info (char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) { ++ drm_mem_stats_t *pt; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *eof = 0; ++ *start = &buf[offset]; ++ ++ DRM_PROC_PRINT(" total counts " ++ " | outstanding \n"); ++ DRM_PROC_PRINT("type alloc freed fail bytes freed" ++ " | allocs bytes\n\n"); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "system", 0, 0, 0, ++ drm_ram_available << (PAGE_SHIFT - 10)); ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", ++ "locked", 0, 0, 0, drm_ram_used >> 10); ++ DRM_PROC_PRINT("\n"); ++ for (pt = drm_mem_stats; pt->name; pt++) { ++ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", ++ pt->name, ++ pt->succeed_count, ++ pt->free_count, ++ pt->fail_count, ++ pt->bytes_allocated, ++ pt->bytes_freed, ++ pt->succeed_count - pt->free_count, ++ (long)pt->bytes_allocated ++ - (long)pt->bytes_freed); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++int drm_mem_info (char *buf, char **start, off_t offset, ++ int len, int *eof, void *data) { ++ int ret; ++ ++ spin_lock(&drm_mem_lock); ++ ret = drm__mem_info (buf, start, offset, len, eof, data); ++ spin_unlock(&drm_mem_lock); ++ return ret; ++} ++ ++void *drm_alloc (size_t size, int area) { ++ void *pt; ++ ++ if (!size) { ++ DRM_MEM_ERROR(area, "Allocating 0 bytes\n"); ++ return NULL; ++ } ++ ++ if (!(pt = kmalloc(size, GFP_KERNEL))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ spin_unlock(&drm_mem_lock); ++ return pt; ++} ++ ++void *drm_calloc (size_t nmemb, size_t size, int area) { ++ void *addr; ++ ++ addr = drm_alloc (nmemb * size, area); ++ if (addr != NULL) ++ memset((void *)addr, 0, size * nmemb); ++ ++ return addr; ++} ++ ++void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) { ++ void *pt; ++ ++ if (!(pt = drm_alloc (size, area))) ++ return NULL; ++ if (oldpt && oldsize) { ++ memcpy(pt, oldpt, oldsize); ++ drm_free (oldpt, oldsize, area); ++ } ++ return pt; ++} ++ ++void drm_free (void *pt, size_t size, int area) { ++ int alloc_count; ++ int free_count; ++ ++ if (!pt) ++ DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); ++ else ++ kfree(pt); ++ spin_lock(&drm_mem_lock); ++ drm_mem_stats[area].bytes_freed += size; ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++unsigned long drm_alloc_pages (int order, int area) { ++ unsigned long address; ++ unsigned long bytes = PAGE_SIZE << order; ++ unsigned long addr; ++ unsigned int sz; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++ ++ address = __get_free_pages(GFP_KERNEL, order); ++ if (!address) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += bytes; ++ drm_ram_used += bytes; ++ spin_unlock(&drm_mem_lock); ++ ++ /* Zero outside the lock */ ++ memset((void *)address, 0, bytes); ++ ++ /* Reserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return address; ++} ++ ++void drm_free_pages (unsigned long address, int order, int area) { ++ unsigned long bytes = PAGE_SIZE << order; ++ int alloc_count; ++ int free_count; ++ unsigned long addr; ++ unsigned int sz; ++ ++ if (!address) { ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++ } else { ++ /* Unreserve */ ++ for (addr = address, sz = bytes; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ free_pages(address, order); ++ } ++ ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += bytes; ++ drm_ram_used -= bytes; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++} ++ ++#if __OS_HAS_AGP ++ ++DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) { ++ DRM_AGP_MEM *handle; ++ ++ if (!pages) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n"); ++ return NULL; ++ } ++ ++ if ((handle = drm_agp_allocate_memory (pages, type))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return handle; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return NULL; ++} ++ ++int drm_free_agp (DRM_AGP_MEM * handle, int pages) { ++ int alloc_count; ++ int free_count; ++ int retval = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Attempt to free NULL AGP handle\n"); ++ return retval; ++ } ++ ++ if (drm_agp_free_memory (handle)) { ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed ++ += pages << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return 0; ++ } ++ return retval; ++} ++ ++int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) { ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to bind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if (!(retcode = drm_agp_bind_memory (handle, start))) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++ } ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count; ++ spin_unlock(&drm_mem_lock); ++ return retcode; ++} ++ ++int drm_unbind_agp (DRM_AGP_MEM * handle) { ++ int alloc_count; ++ int free_count; ++ int retcode = -EINVAL; ++ ++ if (!handle) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Attempt to unbind NULL AGP handle\n"); ++ return retcode; ++ } ++ ++ if ((retcode = drm_agp_unbind_memory (handle))) ++ return retcode; ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count; ++ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count; ++ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed ++ += handle->page_count << PAGE_SHIFT; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++ return retcode; ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.h git-nokia/drivers/gpu/drm-tungsten/drm_memory.h +--- git/drivers/gpu/drm-tungsten/drm_memory.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,61 @@ ++/** ++ * \file drm_memory.h ++ * Memory management wrappers for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++/** ++ * Cut down version of drm_memory_debug.h, which used to be called ++ * drm_memory.h. ++ */ ++ ++#if __OS_HAS_AGP ++ ++#include ++ ++#ifdef HAVE_PAGE_AGP ++#include ++#else ++# ifdef __powerpc__ ++# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) ++# else ++# define PAGE_AGP PAGE_KERNEL ++# endif ++#endif ++ ++#else /* __OS_HAS_AGP */ ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_mm.c git-nokia/drivers/gpu/drm-tungsten/drm_mm.c +--- git/drivers/gpu/drm-tungsten/drm_mm.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_mm.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,298 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++ ++/* ++ * Generic simple memory manager implementation. Intended to be used as a base ++ * class implementation for more advanced memory managers. ++ * ++ * Note that the algorithm used is quite simple and there might be substantial ++ * performance gains if a smarter free list is implemented. Currently it is just an ++ * unordered stack of free regions. This could easily be improved if an RB-tree ++ * is used instead. At least if we expect heavy fragmentation. ++ * ++ * Aligned allocations can also see improvement. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include ++ ++unsigned long drm_mm_tail_space(struct drm_mm *mm) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) ++ return 0; ++ ++ return entry->size; ++} ++ ++int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) ++ return -ENOMEM; ++ ++ if (entry->size <= size) ++ return -ENOMEM; ++ ++ entry->size -= size; ++ return 0; ++} ++ ++ ++static int drm_mm_create_tail_node(struct drm_mm *mm, ++ unsigned long start, ++ unsigned long size) ++{ ++ struct drm_mm_node *child; ++ ++ child = (struct drm_mm_node *) ++ drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); ++ if (!child) ++ return -ENOMEM; ++ ++ child->free = 1; ++ child->size = size; ++ child->start = start; ++ child->mm = mm; ++ ++ list_add_tail(&child->ml_entry, &mm->ml_entry); ++ list_add_tail(&child->fl_entry, &mm->fl_entry); ++ ++ return 0; ++} ++ ++ ++int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) ++{ ++ struct list_head *tail_node; ++ struct drm_mm_node *entry; ++ ++ tail_node = mm->ml_entry.prev; ++ entry = list_entry(tail_node, struct drm_mm_node, ml_entry); ++ if (!entry->free) { ++ return drm_mm_create_tail_node(mm, entry->start + entry->size, size); ++ } ++ entry->size += size; ++ return 0; ++} ++ ++static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, ++ unsigned long size) ++{ ++ struct drm_mm_node *child; ++ ++ child = (struct drm_mm_node *) ++ drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); ++ if (!child) ++ return NULL; ++ ++ INIT_LIST_HEAD(&child->fl_entry); ++ ++ child->free = 0; ++ child->size = size; ++ child->start = parent->start; ++ child->mm = parent->mm; ++ ++ list_add_tail(&child->ml_entry, &parent->ml_entry); ++ INIT_LIST_HEAD(&child->fl_entry); ++ ++ parent->size -= size; ++ parent->start += size; ++ return child; ++} ++ ++struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, ++ unsigned long size, unsigned alignment) ++{ ++ ++ struct drm_mm_node *align_splitoff = NULL; ++ struct drm_mm_node *child; ++ unsigned tmp = 0; ++ ++ if (alignment) ++ tmp = parent->start % alignment; ++ ++ if (tmp) { ++ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); ++ if (!align_splitoff) ++ return NULL; ++ } ++ ++ if (parent->size == size) { ++ list_del_init(&parent->fl_entry); ++ parent->free = 0; ++ return parent; ++ } else { ++ child = drm_mm_split_at_start(parent, size); ++ } ++ ++ if (align_splitoff) ++ drm_mm_put_block(align_splitoff); ++ ++ return child; ++} ++EXPORT_SYMBOL(drm_mm_get_block); ++ ++/* ++ * Put a block. Merge with the previous and / or next block if they are free. ++ * Otherwise add to the free stack. ++ */ ++ ++void drm_mm_put_block(struct drm_mm_node * cur) ++{ ++ ++ struct drm_mm *mm = cur->mm; ++ struct list_head *cur_head = &cur->ml_entry; ++ struct list_head *root_head = &mm->ml_entry; ++ struct drm_mm_node *prev_node = NULL; ++ struct drm_mm_node *next_node; ++ ++ int merged = 0; ++ ++ if (cur_head->prev != root_head) { ++ prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); ++ if (prev_node->free) { ++ prev_node->size += cur->size; ++ merged = 1; ++ } ++ } ++ if (cur_head->next != root_head) { ++ next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); ++ if (next_node->free) { ++ if (merged) { ++ prev_node->size += next_node->size; ++ list_del(&next_node->ml_entry); ++ list_del(&next_node->fl_entry); ++ drm_ctl_free(next_node, sizeof(*next_node), ++ DRM_MEM_MM); ++ } else { ++ next_node->size += cur->size; ++ next_node->start = cur->start; ++ merged = 1; ++ } ++ } ++ } ++ if (!merged) { ++ cur->free = 1; ++ list_add(&cur->fl_entry, &mm->fl_entry); ++ } else { ++ list_del(&cur->ml_entry); ++ drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); ++ } ++} ++EXPORT_SYMBOL(drm_mm_put_block); ++ ++struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, ++ unsigned long size, ++ unsigned alignment, int best_match) ++{ ++ struct list_head *list; ++ const struct list_head *free_stack = &mm->fl_entry; ++ struct drm_mm_node *entry; ++ struct drm_mm_node *best; ++ unsigned long best_size; ++ unsigned wasted; ++ ++ best = NULL; ++ best_size = ~0UL; ++ ++ list_for_each(list, free_stack) { ++ entry = list_entry(list, struct drm_mm_node, fl_entry); ++ wasted = 0; ++ ++ if (entry->size < size) ++ continue; ++ ++ if (alignment) { ++ register unsigned tmp = entry->start % alignment; ++ if (tmp) ++ wasted += alignment - tmp; ++ } ++ ++ ++ if (entry->size >= size + wasted) { ++ if (!best_match) ++ return entry; ++ if (size < best_size) { ++ best = entry; ++ best_size = entry->size; ++ } ++ } ++ } ++ ++ return best; ++} ++EXPORT_SYMBOL(drm_mm_search_free); ++ ++int drm_mm_clean(struct drm_mm * mm) ++{ ++ struct list_head *head = &mm->ml_entry; ++ ++ return (head->next->next == head); ++} ++ ++int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) ++{ ++ INIT_LIST_HEAD(&mm->ml_entry); ++ INIT_LIST_HEAD(&mm->fl_entry); ++ ++ return drm_mm_create_tail_node(mm, start, size); ++} ++ ++EXPORT_SYMBOL(drm_mm_init); ++ ++void drm_mm_takedown(struct drm_mm * mm) ++{ ++ struct list_head *bnode = mm->fl_entry.next; ++ struct drm_mm_node *entry; ++ ++ entry = list_entry(bnode, struct drm_mm_node, fl_entry); ++ ++ if (entry->ml_entry.next != &mm->ml_entry || ++ entry->fl_entry.next != &mm->fl_entry) { ++ DRM_ERROR("Memory manager not clean. Delaying takedown\n"); ++ return; ++ } ++ ++ list_del(&entry->fl_entry); ++ list_del(&entry->ml_entry); ++ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); ++} ++ ++EXPORT_SYMBOL(drm_mm_takedown); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_object.c git-nokia/drivers/gpu/drm-tungsten/drm_object.c +--- git/drivers/gpu/drm-tungsten/drm_object.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_object.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,294 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, ++ int shareable) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ /* The refcount will be bumped to 1 when we add the ref object below. */ ++ atomic_set(&item->refcount, 0); ++ item->shareable = shareable; ++ item->owner = priv; ++ ++ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, ++ (unsigned long)item, 31, 0, 0); ++ if (ret) ++ return ret; ++ ++ ret = drm_add_ref_object(priv, item, _DRM_REF_USE); ++ if (ret) ++ ret = drm_ht_remove_item(&dev->object_hash, &item->hash); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_add_user_object); ++ ++struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_hash_item *hash; ++ int ret; ++ struct drm_user_object *item; ++ ++ DRM_ASSERT_LOCKED(&dev->struct_mutex); ++ ++ ret = drm_ht_find_item(&dev->object_hash, key, &hash); ++ if (ret) ++ return NULL; ++ ++ item = drm_hash_entry(hash, struct drm_user_object, hash); ++ ++ if (priv != item->owner) { ++ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ++ ret = drm_ht_find_item(ht, (unsigned long)item, &hash); ++ if (ret) { ++ DRM_ERROR("Object not registered for usage\n"); ++ return NULL; ++ } ++ } ++ return item; ++} ++EXPORT_SYMBOL(drm_lookup_user_object); ++ ++static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ if (atomic_dec_and_test(&item->refcount)) { ++ ret = drm_ht_remove_item(&dev->object_hash, &item->hash); ++ BUG_ON(ret); ++ item->remove(priv, item); ++ } ++} ++ ++static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro, ++ enum drm_ref_type action) ++{ ++ int ret = 0; ++ ++ switch (action) { ++ case _DRM_REF_USE: ++ atomic_inc(&ro->refcount); ++ break; ++ default: ++ if (!ro->ref_struct_locked) { ++ break; ++ } else { ++ ro->ref_struct_locked(priv, ro, action); ++ } ++ } ++ return ret; ++} ++ ++int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action) ++{ ++ int ret = 0; ++ struct drm_ref_object *item; ++ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ if (!referenced_object->shareable && priv != referenced_object->owner) { ++ DRM_ERROR("Not allowed to reference this object\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If this is not a usage reference, Check that usage has been registered ++ * first. Otherwise strange things may happen on destruction. ++ */ ++ ++ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { ++ item = ++ drm_lookup_ref_object(priv, referenced_object, ++ _DRM_REF_USE); ++ if (!item) { ++ DRM_ERROR ++ ("Object not registered for usage by this client\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (NULL != ++ (item = ++ drm_lookup_ref_object(priv, referenced_object, ref_action))) { ++ atomic_inc(&item->refcount); ++ return drm_object_ref_action(priv, referenced_object, ++ ref_action); ++ } ++ ++ item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); ++ if (item == NULL) { ++ DRM_ERROR("Could not allocate reference object\n"); ++ return -ENOMEM; ++ } ++ ++ atomic_set(&item->refcount, 1); ++ item->hash.key = (unsigned long)referenced_object; ++ ret = drm_ht_insert_item(ht, &item->hash); ++ item->unref_action = ref_action; ++ ++ if (ret) ++ goto out; ++ ++ list_add(&item->list, &priv->refd_objects); ++ ret = drm_object_ref_action(priv, referenced_object, ref_action); ++out: ++ return ret; ++} ++ ++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action) ++{ ++ struct drm_hash_item *hash; ++ int ret; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], ++ (unsigned long)referenced_object, &hash); ++ if (ret) ++ return NULL; ++ ++ return drm_hash_entry(hash, struct drm_ref_object, hash); ++} ++EXPORT_SYMBOL(drm_lookup_ref_object); ++ ++static void drm_remove_other_references(struct drm_file *priv, ++ struct drm_user_object *ro) ++{ ++ int i; ++ struct drm_open_hash *ht; ++ struct drm_hash_item *hash; ++ struct drm_ref_object *item; ++ ++ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ++ ht = &priv->refd_object_hash[i]; ++ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { ++ item = drm_hash_entry(hash, struct drm_ref_object, hash); ++ drm_remove_ref_object(priv, item); ++ } ++ } ++} ++ ++void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item) ++{ ++ int ret; ++ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; ++ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; ++ enum drm_ref_type unref_action; ++ ++ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex); ++ unref_action = item->unref_action; ++ if (atomic_dec_and_test(&item->refcount)) { ++ ret = drm_ht_remove_item(ht, &item->hash); ++ BUG_ON(ret); ++ list_del_init(&item->list); ++ if (unref_action == _DRM_REF_USE) ++ drm_remove_other_references(priv, user_object); ++ drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); ++ } ++ ++ switch (unref_action) { ++ case _DRM_REF_USE: ++ drm_deref_user_object(priv, user_object); ++ break; ++ default: ++ BUG_ON(!user_object->unref); ++ user_object->unref(priv, user_object, unref_action); ++ break; ++ } ++ ++} ++EXPORT_SYMBOL(drm_remove_ref_object); ++ ++int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type, struct drm_user_object **object) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_hash_item *hash; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash); ++ if (ret) { ++ DRM_ERROR("Could not find user object to reference.\n"); ++ goto out_err; ++ } ++ uo = drm_hash_entry(hash, struct drm_user_object, hash); ++ if (uo->type != type) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); ++ if (ret) ++ goto out_err; ++ mutex_unlock(&dev->struct_mutex); ++ *object = uo; ++ return 0; ++out_err: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type) ++{ ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_user_object *uo; ++ struct drm_ref_object *ro; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ uo = drm_lookup_user_object(priv, user_token); ++ if (!uo || (uo->type != type)) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); ++ if (!ro) { ++ ret = -EINVAL; ++ goto out_err; ++ } ++ drm_remove_ref_object(priv, ro); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++out_err: ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_objects.h git-nokia/drivers/gpu/drm-tungsten/drm_objects.h +--- git/drivers/gpu/drm-tungsten/drm_objects.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_objects.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,832 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#ifndef _DRM_OBJECTS_H ++#define _DRM_OBJECTS_H ++ ++struct drm_device; ++struct drm_bo_mem_reg; ++ ++/*************************************************** ++ * User space objects. (drm_object.c) ++ */ ++ ++#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) ++ ++enum drm_object_type { ++ drm_fence_type, ++ drm_buffer_type, ++ drm_lock_type, ++ /* ++ * Add other user space object types here. ++ */ ++ drm_driver_type0 = 256, ++ drm_driver_type1, ++ drm_driver_type2, ++ drm_driver_type3, ++ drm_driver_type4 ++}; ++ ++/* ++ * A user object is a structure that helps the drm give out user handles ++ * to kernel internal objects and to keep track of these objects so that ++ * they can be destroyed, for example when the user space process exits. ++ * Designed to be accessible using a user space 32-bit handle. ++ */ ++ ++struct drm_user_object { ++ struct drm_hash_item hash; ++ struct list_head list; ++ enum drm_object_type type; ++ atomic_t refcount; ++ int shareable; ++ struct drm_file *owner; ++ void (*ref_struct_locked) (struct drm_file *priv, ++ struct drm_user_object *obj, ++ enum drm_ref_type ref_action); ++ void (*unref) (struct drm_file *priv, struct drm_user_object *obj, ++ enum drm_ref_type unref_action); ++ void (*remove) (struct drm_file *priv, struct drm_user_object *obj); ++}; ++ ++/* ++ * A ref object is a structure which is used to ++ * keep track of references to user objects and to keep track of these ++ * references so that they can be destroyed for example when the user space ++ * process exits. Designed to be accessible using a pointer to the _user_ object. ++ */ ++ ++struct drm_ref_object { ++ struct drm_hash_item hash; ++ struct list_head list; ++ atomic_t refcount; ++ enum drm_ref_type unref_action; ++}; ++ ++/** ++ * Must be called with the struct_mutex held. ++ */ ++ ++extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, ++ int shareable); ++/** ++ * Must be called with the struct_mutex held. ++ */ ++ ++extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, ++ uint32_t key); ++ ++/* ++ * Must be called with the struct_mutex held. May temporarily release it. ++ */ ++ ++extern int drm_add_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action); ++ ++/* ++ * Must be called with the struct_mutex held. ++ */ ++ ++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, ++ struct drm_user_object *referenced_object, ++ enum drm_ref_type ref_action); ++/* ++ * Must be called with the struct_mutex held. ++ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not ++ * release the struct_mutex before calling drm_remove_ref_object. ++ * This function may temporarily release the struct_mutex. ++ */ ++ ++extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); ++extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type, ++ struct drm_user_object **object); ++extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, ++ enum drm_object_type type); ++ ++/*************************************************** ++ * Fence objects. (drm_fence.c) ++ */ ++ ++struct drm_fence_object { ++ struct drm_user_object base; ++ struct drm_device *dev; ++ atomic_t usage; ++ ++ /* ++ * The below three fields are protected by the fence manager spinlock. ++ */ ++ ++ struct list_head ring; ++ int fence_class; ++ uint32_t native_types; ++ uint32_t type; ++ uint32_t signaled_types; ++ uint32_t sequence; ++ uint32_t waiting_types; ++ uint32_t error; ++}; ++ ++#define _DRM_FENCE_CLASSES 8 ++#define _DRM_FENCE_TYPE_EXE 0x00 ++ ++struct drm_fence_class_manager { ++ struct list_head ring; ++ uint32_t pending_flush; ++ uint32_t waiting_types; ++ wait_queue_head_t fence_queue; ++ uint32_t highest_waiting_sequence; ++ uint32_t latest_queued_sequence; ++}; ++ ++struct drm_fence_manager { ++ int initialized; ++ rwlock_t lock; ++ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; ++ uint32_t num_classes; ++ atomic_t count; ++}; ++ ++struct drm_fence_driver { ++ unsigned long *waiting_jiffies; ++ uint32_t num_classes; ++ uint32_t wrap_diff; ++ uint32_t flush_diff; ++ uint32_t sequence_mask; ++ ++ /* ++ * Driver implemented functions: ++ * has_irq() : 1 if the hardware can update the indicated type_flags using an ++ * irq handler. 0 if polling is required. ++ * ++ * emit() : Emit a sequence number to the command stream. ++ * Return the sequence number. ++ * ++ * flush() : Make sure the flags indicated in fc->pending_flush will eventually ++ * signal for fc->highest_received_sequence and all preceding sequences. ++ * Acknowledge by clearing the flags fc->pending_flush. ++ * ++ * poll() : Call drm_fence_handler with any new information. ++ * ++ * needed_flush() : Given the current state of the fence->type flags and previusly ++ * executed or queued flushes, return the type_flags that need flushing. ++ * ++ * wait(): Wait for the "mask" flags to signal on a given fence, performing ++ * whatever's necessary to make this happen. ++ */ ++ ++ int (*has_irq) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags); ++ int (*emit) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t flags, uint32_t *breadcrumb, ++ uint32_t *native_type); ++ void (*flush) (struct drm_device *dev, uint32_t fence_class); ++ void (*poll) (struct drm_device *dev, uint32_t fence_class, ++ uint32_t types); ++ uint32_t (*needed_flush) (struct drm_fence_object *fence); ++ int (*wait) (struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask); ++}; ++ ++extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy, ++ int interruptible, uint32_t mask, ++ unsigned long end_jiffies); ++extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence, uint32_t type, ++ uint32_t error); ++extern void drm_fence_manager_init(struct drm_device *dev); ++extern void drm_fence_manager_takedown(struct drm_device *dev); ++extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, ++ uint32_t sequence); ++extern int drm_fence_object_flush(struct drm_fence_object *fence, ++ uint32_t type); ++extern int drm_fence_object_signaled(struct drm_fence_object *fence, ++ uint32_t type); ++extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); ++extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence); ++extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); ++extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, ++ struct drm_fence_object *src); ++extern int drm_fence_object_wait(struct drm_fence_object *fence, ++ int lazy, int ignore_signals, uint32_t mask); ++extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, ++ uint32_t fence_flags, uint32_t fence_class, ++ struct drm_fence_object **c_fence); ++extern int drm_fence_object_emit(struct drm_fence_object *fence, ++ uint32_t fence_flags, uint32_t class, ++ uint32_t type); ++extern void drm_fence_fill_arg(struct drm_fence_object *fence, ++ struct drm_fence_arg *arg); ++ ++extern int drm_fence_add_user_object(struct drm_file *priv, ++ struct drm_fence_object *fence, ++ int shareable); ++ ++extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++/************************************************** ++ *TTMs ++ */ ++ ++/* ++ * The ttm backend GTT interface. (In our case AGP). ++ * Any similar type of device (PCIE?) ++ * needs only to implement these functions to be usable with the TTM interface. ++ * The AGP backend implementation lives in drm_agpsupport.c ++ * basically maps these calls to available functions in agpgart. ++ * Each drm device driver gets an ++ * additional function pointer that creates these types, ++ * so that the device can choose the correct aperture. ++ * (Multiple AGP apertures, etc.) ++ * Most device drivers will let this point to the standard AGP implementation. ++ */ ++ ++#define DRM_BE_FLAG_NEEDS_FREE 0x00000001 ++#define DRM_BE_FLAG_BOUND_CACHED 0x00000002 ++ ++struct drm_ttm_backend; ++struct drm_ttm_backend_func { ++ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); ++ int (*populate) (struct drm_ttm_backend *backend, ++ unsigned long num_pages, struct page **pages, ++ struct page *dummy_read_page); ++ void (*clear) (struct drm_ttm_backend *backend); ++ int (*bind) (struct drm_ttm_backend *backend, ++ struct drm_bo_mem_reg *bo_mem); ++ int (*unbind) (struct drm_ttm_backend *backend); ++ void (*destroy) (struct drm_ttm_backend *backend); ++}; ++ ++/** ++ * This structure associates a set of flags and methods with a drm_ttm ++ * object, and will also be subclassed by the particular backend. ++ * ++ * \sa #drm_agp_ttm_backend ++ */ ++struct drm_ttm_backend { ++ struct drm_device *dev; ++ uint32_t flags; ++ struct drm_ttm_backend_func *func; ++}; ++ ++struct drm_ttm { ++ struct page *dummy_read_page; ++ struct page **pages; ++ long first_himem_page; ++ long last_lomem_page; ++ uint32_t page_flags; ++ unsigned long num_pages; ++ atomic_t vma_count; ++ struct drm_device *dev; ++ int destroy; ++ uint32_t mapping_offset; ++ struct drm_ttm_backend *be; ++ unsigned long highest_lomem_entry; ++ unsigned long lowest_himem_entry; ++ enum { ++ ttm_bound, ++ ttm_evicted, ++ ttm_unbound, ++ ttm_unpopulated, ++ } state; ++ ++}; ++ ++extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, ++ struct page *dummy_read_page); ++extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); ++extern void drm_ttm_unbind(struct drm_ttm *ttm); ++extern void drm_ttm_evict(struct drm_ttm *ttm); ++extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); ++extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); ++extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages); ++extern int drm_ttm_populate(struct drm_ttm *ttm); ++extern int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages); ++ ++/* ++ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do ++ * this which calls this function iff there are no vmas referencing it anymore. ++ * Otherwise it is called when the last vma exits. ++ */ ++ ++extern int drm_ttm_destroy(struct drm_ttm *ttm); ++ ++#define DRM_FLAG_MASKED(_old, _new, _mask) {\ ++(_old) ^= (((_old) ^ (_new)) & (_mask)); \ ++} ++ ++#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) ++#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) ++ ++/* ++ * Page flags. ++ */ ++ ++/* ++ * This ttm should not be cached by the CPU ++ */ ++#define DRM_TTM_PAGE_UNCACHED (1 << 0) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_USED (1 << 1) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_BOUND (1 << 2) ++/* ++ * This flat is not used at this time; I don't know what the ++ * intent was ++ */ ++#define DRM_TTM_PAGE_PRESENT (1 << 3) ++/* ++ * The array of page pointers was allocated with vmalloc ++ * instead of drm_calloc. ++ */ ++#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4) ++/* ++ * This ttm is mapped from user space ++ */ ++#define DRM_TTM_PAGE_USER (1 << 5) ++/* ++ * This ttm will be written to by the GPU ++ */ ++#define DRM_TTM_PAGE_WRITE (1 << 6) ++/* ++ * This ttm was mapped to the GPU, and so the contents may have ++ * been modified ++ */ ++#define DRM_TTM_PAGE_USER_DIRTY (1 << 7) ++/* ++ * This flag is not used at this time; I don't know what the ++ * intent was. ++ */ ++#define DRM_TTM_PAGE_USER_DMA (1 << 8) ++ ++/*************************************************** ++ * Buffer objects. (drm_bo.c, drm_bo_move.c) ++ */ ++ ++struct drm_bo_mem_reg { ++ struct drm_mm_node *mm_node; ++ unsigned long size; ++ unsigned long num_pages; ++ uint32_t page_alignment; ++ uint32_t mem_type; ++ /* ++ * Current buffer status flags, indicating ++ * where the buffer is located and which ++ * access modes are in effect ++ */ ++ uint64_t flags; ++ /** ++ * These are the flags proposed for ++ * a validate operation. If the ++ * validate succeeds, they'll get moved ++ * into the flags field ++ */ ++ uint64_t proposed_flags; ++ ++ uint32_t desired_tile_stride; ++ uint32_t hw_tile_stride; ++}; ++ ++enum drm_bo_type { ++ /* ++ * drm_bo_type_device are 'normal' drm allocations, ++ * pages are allocated from within the kernel automatically ++ * and the objects can be mmap'd from the drm device. Each ++ * drm_bo_type_device object has a unique name which can be ++ * used by other processes to share access to the underlying ++ * buffer. ++ */ ++ drm_bo_type_device, ++ /* ++ * drm_bo_type_user are buffers of pages that already exist ++ * in the process address space. They are more limited than ++ * drm_bo_type_device buffers in that they must always ++ * remain cached (as we assume the user pages are mapped cached), ++ * and they are not sharable to other processes through DRM ++ * (although, regular shared memory should still work fine). ++ */ ++ drm_bo_type_user, ++ /* ++ * drm_bo_type_kernel are buffers that exist solely for use ++ * within the kernel. The pages cannot be mapped into the ++ * process. One obvious use would be for the ring ++ * buffer where user access would not (ideally) be required. ++ */ ++ drm_bo_type_kernel, ++}; ++ ++struct drm_buffer_object { ++ struct drm_device *dev; ++ struct drm_user_object base; ++ ++ /* ++ * If there is a possibility that the usage variable is zero, ++ * then dev->struct_mutext should be locked before incrementing it. ++ */ ++ ++ atomic_t usage; ++ unsigned long buffer_start; ++ enum drm_bo_type type; ++ unsigned long offset; ++ atomic_t mapped; ++ struct drm_bo_mem_reg mem; ++ ++ struct list_head lru; ++ struct list_head ddestroy; ++ ++ uint32_t fence_type; ++ uint32_t fence_class; ++ uint32_t new_fence_type; ++ uint32_t new_fence_class; ++ struct drm_fence_object *fence; ++ uint32_t priv_flags; ++ wait_queue_head_t event_queue; ++ struct mutex mutex; ++ unsigned long num_pages; ++ ++ /* For pinned buffers */ ++ struct drm_mm_node *pinned_node; ++ uint32_t pinned_mem_type; ++ struct list_head pinned_lru; ++ ++ /* For vm */ ++ struct drm_ttm *ttm; ++ struct drm_map_list map_list; ++ uint32_t memory_type; ++ unsigned long bus_offset; ++ uint32_t vm_flags; ++ void *iomap; ++ ++#ifdef DRM_ODD_MM_COMPAT ++ /* dev->struct_mutex only protected. */ ++ struct list_head vma_list; ++ struct list_head p_mm_list; ++#endif ++ ++}; ++ ++#define _DRM_BO_FLAG_UNFENCED 0x00000001 ++#define _DRM_BO_FLAG_EVICTED 0x00000002 ++ ++/* ++ * This flag indicates that a flag called with bo->mutex held has ++ * temporarily released the buffer object mutex, (usually to wait for something). ++ * and thus any post-lock validation needs to be rerun. ++ */ ++ ++#define _DRM_BO_FLAG_UNLOCKED 0x00000004 ++ ++struct drm_mem_type_manager { ++ int has_type; ++ int use_type; ++ int kern_init_type; ++ struct drm_mm manager; ++ struct list_head lru; ++ struct list_head pinned; ++ uint32_t flags; ++ uint32_t drm_bus_maptype; ++ unsigned long gpu_offset; ++ unsigned long io_offset; ++ unsigned long io_size; ++ void *io_addr; ++ uint64_t size; /* size of managed area for reporting to userspace */ ++}; ++ ++struct drm_bo_lock { ++ struct drm_user_object base; ++ wait_queue_head_t queue; ++ atomic_t write_lock_pending; ++ atomic_t readers; ++}; ++ ++#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ ++#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ ++#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ ++#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap ++ before kernel access. */ ++#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ ++#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ ++ ++struct drm_buffer_manager { ++ struct drm_bo_lock bm_lock; ++ struct mutex evict_mutex; ++ int nice_mode; ++ int initialized; ++ struct drm_file *last_to_validate; ++ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; ++ struct list_head unfenced; ++ struct list_head ddestroy; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct wq; ++#else ++ struct delayed_work wq; ++#endif ++ uint32_t fence_type; ++ unsigned long cur_pages; ++ atomic_t count; ++ struct page *dummy_read_page; ++}; ++ ++struct drm_bo_driver { ++ const uint32_t *mem_type_prio; ++ const uint32_t *mem_busy_prio; ++ uint32_t num_mem_type_prio; ++ uint32_t num_mem_busy_prio; ++ struct drm_ttm_backend *(*create_ttm_backend_entry) ++ (struct drm_device *dev); ++ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, ++ uint32_t *type); ++ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); ++ int (*init_mem_type) (struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man); ++ /* ++ * evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++ uint64_t(*evict_flags) (struct drm_buffer_object *bo); ++ /* ++ * move: ++ * ++ * @bo: the buffer to move ++ * ++ * @evict: whether this motion is evicting the buffer from ++ * the graphics address space ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this move would require sleeping ++ * ++ * @new_mem: the new memory region receiving the buffer ++ * ++ * Move a buffer between two memory regions. ++ */ ++ int (*move) (struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem); ++ /* ++ * ttm_cache_flush ++ */ ++ void (*ttm_cache_flush)(struct drm_ttm *ttm); ++ ++ /* ++ * command_stream_barrier ++ * ++ * @dev: The drm device. ++ * ++ * @bo: The buffer object to validate. ++ * ++ * @new_fence_class: The new fence class for the buffer object. ++ * ++ * @new_fence_type: The new fence type for the buffer object. ++ * ++ * @no_wait: whether this should give up and return -EBUSY ++ * if this operation would require sleeping ++ * ++ * Insert a command stream barrier that makes sure that the ++ * buffer is idle once the commands associated with the ++ * current validation are starting to execute. If an error ++ * condition is returned, or the function pointer is NULL, ++ * the drm core will force buffer idle ++ * during validation. ++ */ ++ ++ int (*command_stream_barrier) (struct drm_buffer_object *bo, ++ uint32_t new_fence_class, ++ uint32_t new_fence_type, ++ int no_wait); ++}; ++ ++/* ++ * buffer objects (drm_bo.c) ++ */ ++ ++extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int drm_bo_driver_finish(struct drm_device *dev); ++extern int drm_bo_driver_init(struct drm_device *dev); ++extern int drm_bo_pci_offset(struct drm_device *dev, ++ struct drm_bo_mem_reg *mem, ++ unsigned long *bus_base, ++ unsigned long *bus_offset, ++ unsigned long *bus_size); ++extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); ++ ++extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); ++extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); ++extern void drm_putback_buffer_objects(struct drm_device *dev); ++extern int drm_fence_buffer_objects(struct drm_device *dev, ++ struct list_head *list, ++ uint32_t fence_flags, ++ struct drm_fence_object *fence, ++ struct drm_fence_object **used_fence); ++extern void drm_bo_add_to_lru(struct drm_buffer_object *bo); ++extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, ++ enum drm_bo_type type, uint64_t flags, ++ uint32_t hint, uint32_t page_alignment, ++ unsigned long buffer_start, ++ struct drm_buffer_object **bo); ++extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, ++ int no_wait, int check_unfenced); ++extern int drm_bo_mem_space(struct drm_buffer_object *bo, ++ struct drm_bo_mem_reg *mem, int no_wait); ++extern int drm_bo_move_buffer(struct drm_buffer_object *bo, ++ uint64_t new_mem_flags, ++ int no_wait, int move_unfenced); ++extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean); ++extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, ++ unsigned long p_offset, unsigned long p_size, ++ int kern_init); ++extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep, ++ struct drm_buffer_object **bo_rep); ++extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, ++ uint32_t handle, ++ int check_owner); ++extern int drm_bo_do_validate(struct drm_buffer_object *bo, ++ uint64_t flags, uint64_t mask, uint32_t hint, ++ uint32_t fence_class, ++ struct drm_bo_info_rep *rep); ++extern int drm_bo_evict_cached(struct drm_buffer_object *bo); ++/* ++ * Buffer object memory move- and map helpers. ++ * drm_bo_move.c ++ */ ++ ++extern int drm_bo_move_ttm(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_memcpy(struct drm_buffer_object *bo, ++ int evict, ++ int no_wait, struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, ++ int evict, int no_wait, ++ uint32_t fence_class, uint32_t fence_type, ++ uint32_t fence_flags, ++ struct drm_bo_mem_reg *new_mem); ++extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); ++extern unsigned long drm_bo_offset_end(unsigned long offset, ++ unsigned long end); ++ ++struct drm_bo_kmap_obj { ++ void *virtual; ++ struct page *page; ++ enum { ++ bo_map_iomap, ++ bo_map_vmap, ++ bo_map_kmap, ++ bo_map_premapped, ++ } bo_kmap_type; ++}; ++ ++static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) ++{ ++ *is_iomem = (map->bo_kmap_type == bo_map_iomap || ++ map->bo_kmap_type == bo_map_premapped); ++ return map->virtual; ++} ++extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); ++extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, ++ unsigned long num_pages, struct drm_bo_kmap_obj *map); ++extern int drm_bo_pfn_prot(struct drm_buffer_object *bo, ++ unsigned long dst_offset, ++ unsigned long *pfn, ++ pgprot_t *prot); ++extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, ++ struct drm_bo_info_rep *rep); ++ ++ ++/* ++ * drm_regman.c ++ */ ++ ++struct drm_reg { ++ struct list_head head; ++ struct drm_fence_object *fence; ++ uint32_t fence_type; ++ uint32_t new_fence_type; ++}; ++ ++struct drm_reg_manager { ++ struct list_head free; ++ struct list_head lru; ++ struct list_head unfenced; ++ ++ int (*reg_reusable)(const struct drm_reg *reg, const void *data); ++ void (*reg_destroy)(struct drm_reg *reg); ++}; ++ ++extern int drm_regs_alloc(struct drm_reg_manager *manager, ++ const void *data, ++ uint32_t fence_class, ++ uint32_t fence_type, ++ int interruptible, ++ int no_wait, ++ struct drm_reg **reg); ++ ++extern void drm_regs_fence(struct drm_reg_manager *regs, ++ struct drm_fence_object *fence); ++ ++extern void drm_regs_free(struct drm_reg_manager *manager); ++extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); ++extern void drm_regs_init(struct drm_reg_manager *manager, ++ int (*reg_reusable)(const struct drm_reg *, ++ const void *), ++ void (*reg_destroy)(struct drm_reg *)); ++ ++/* ++ * drm_bo_lock.c ++ * Simple replacement for the hardware lock on buffer manager init and clean. ++ */ ++ ++ ++extern void drm_bo_init_lock(struct drm_bo_lock *lock); ++extern void drm_bo_read_unlock(struct drm_bo_lock *lock); ++extern int drm_bo_read_lock(struct drm_bo_lock *lock, ++ int interruptible); ++extern int drm_bo_write_lock(struct drm_bo_lock *lock, ++ int interruptible, ++ struct drm_file *file_priv); ++ ++extern int drm_bo_write_unlock(struct drm_bo_lock *lock, ++ struct drm_file *file_priv); ++ ++#ifdef CONFIG_DEBUG_MUTEXES ++#define DRM_ASSERT_LOCKED(_mutex) \ ++ BUG_ON(!mutex_is_locked(_mutex) || \ ++ ((_mutex)->owner != current_thread_info())) ++#else ++#define DRM_ASSERT_LOCKED(_mutex) ++#endif ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_os_linux.h git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h +--- git/drivers/gpu/drm-tungsten/drm_os_linux.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,145 @@ ++/** ++ * \file drm_os_linux.h ++ * OS abstraction macros. ++ */ ++ ++#include /* For task queue support */ ++#include ++ ++/** Current process ID */ ++#define DRM_CURRENTPID current->pid ++#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) ++#define DRM_UDELAY(d) udelay(d) ++#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */ ++#ifndef __iomem ++#define __iomem ++#endif ++/** Read a byte from a MMIO region */ ++#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) ++/** Read a word from a MMIO region */ ++#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) ++/** Read a dword from a MMIO region */ ++#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) ++/** Write a byte into a MMIO region */ ++#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) ++/** Write a word into a MMIO region */ ++#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) ++/** Write a dword into a MMIO region */ ++#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) ++#else ++/** Read a byte from a MMIO region */ ++#define DRM_READ8(map, offset) readb((map)->handle + (offset)) ++/** Read a word from a MMIO region */ ++#define DRM_READ16(map, offset) readw((map)->handle + (offset)) ++/** Read a dword from a MMIO region */ ++#define DRM_READ32(map, offset) readl((map)->handle + (offset)) ++/** Write a byte into a MMIO region */ ++#define DRM_WRITE8(map, offset, val) writeb(val, (map)->handle + (offset)) ++/** Write a word into a MMIO region */ ++#define DRM_WRITE16(map, offset, val) writew(val, (map)->handle + (offset)) ++/** Write a dword into a MMIO region */ ++#define DRM_WRITE32(map, offset, val) writel(val, (map)->handle + (offset)) ++#endif ++/** Read memory barrier */ ++#define DRM_READMEMORYBARRIER() rmb() ++/** Write memory barrier */ ++#define DRM_WRITEMEMORYBARRIER() wmb() ++/** Read/write memory barrier */ ++#define DRM_MEMORYBARRIER() mb() ++ ++/** IRQ handler arguments and return type and values */ ++#define DRM_IRQ_ARGS int irq, void *arg ++/** backwards compatibility with old irq return values */ ++#ifndef IRQ_HANDLED ++typedef void irqreturn_t; ++#define IRQ_HANDLED /* nothing */ ++#define IRQ_NONE /* nothing */ ++#endif ++ ++/** AGP types */ ++#if __OS_HAS_AGP ++#define DRM_AGP_MEM struct agp_memory ++#define DRM_AGP_KERN struct agp_kern_info ++#else ++/* define some dummy types for non AGP supporting kernels */ ++struct no_agp_kern { ++ unsigned long aper_base; ++ unsigned long aper_size; ++}; ++#define DRM_AGP_MEM int ++#define DRM_AGP_KERN struct no_agp_kern ++#endif ++ ++#if !(__OS_HAS_MTRR) ++static __inline__ int mtrr_add(unsigned long base, unsigned long size, ++ unsigned int type, char increment) ++{ ++ return -ENODEV; ++} ++ ++static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) ++{ ++ return -ENODEV; ++} ++ ++#define MTRR_TYPE_WRCOMB 1 ++#endif ++ ++/** Other copying of data to kernel space */ ++#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ ++ copy_from_user(arg1, arg2, arg3) ++/** Other copying of data from kernel space */ ++#define DRM_COPY_TO_USER(arg1, arg2, arg3) \ ++ copy_to_user(arg1, arg2, arg3) ++/* Macros for copyfrom user, but checking readability only once */ ++#define DRM_VERIFYAREA_READ( uaddr, size ) \ ++ (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT) ++#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ ++ __copy_from_user(arg1, arg2, arg3) ++#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ ++ __copy_to_user(arg1, arg2, arg3) ++#define DRM_GET_USER_UNCHECKED(val, uaddr) \ ++ __get_user(val, uaddr) ++ ++#define DRM_HZ HZ ++ ++#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ ++do { \ ++ DECLARE_WAITQUEUE(entry, current); \ ++ unsigned long end = jiffies + (timeout); \ ++ add_wait_queue(&(queue), &entry); \ ++ \ ++ for (;;) { \ ++ __set_current_state(TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (time_after_eq(jiffies, end)) { \ ++ ret = -EBUSY; \ ++ break; \ ++ } \ ++ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ ++ if (signal_pending(current)) { \ ++ ret = -EINTR; \ ++ break; \ ++ } \ ++ } \ ++ __set_current_state(TASK_RUNNING); \ ++ remove_wait_queue(&(queue), &entry); \ ++} while (0) ++ ++#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) ++#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) ++ ++/** Type for the OS's non-sleepable mutex lock */ ++#define DRM_SPINTYPE spinlock_t ++/** ++ * Initialize the lock for use. name is an optional string describing the ++ * lock ++ */ ++#define DRM_SPININIT(l,name) spin_lock_init(l) ++#define DRM_SPINUNINIT(l) ++#define DRM_SPINLOCK(l) spin_lock(l) ++#define DRM_SPINUNLOCK(l) spin_unlock(l) ++#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags); ++#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); ++#define DRM_SPINLOCK_ASSERT(l) do {} while (0) +diff -Nurd git/drivers/gpu/drm-tungsten/drm_pci.c git-nokia/drivers/gpu/drm-tungsten/drm_pci.c +--- git/drivers/gpu/drm-tungsten/drm_pci.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_pci.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,177 @@ ++/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ ++/** ++ * \file drm_pci.c ++ * \brief Functions and ioctls to manage PCI memory ++ * ++ * \warning These interfaces aren't stable yet. ++ * ++ * \todo Implement the remaining ioctl's for the PCI pools. ++ * \todo The wrappers here are so thin that they would be better off inlined.. ++ * ++ * \author Jose Fonseca ++ * \author Leif Delgass ++ */ ++ ++/* ++ * Copyright 2003 Jos�Fonseca. ++ * Copyright 2003 Leif Delgass. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include "drmP.h" ++ ++/**********************************************************************/ ++/** \name PCI memory */ ++/*@{*/ ++ ++/** ++ * \brief Allocate a PCI consistent memory block, for DMA. ++ */ ++drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, ++ dma_addr_t maxaddr) ++{ ++ drm_dma_handle_t *dmah; ++ unsigned long addr; ++ size_t sz; ++#ifdef DRM_DEBUG_MEMORY ++ int area = DRM_MEM_DMA; ++ ++ spin_lock(&drm_mem_lock); ++ if ((drm_ram_used >> PAGE_SHIFT) ++ > (DRM_RAM_PERCENT * drm_ram_available) / 100) { ++ spin_unlock(&drm_mem_lock); ++ return 0; ++ } ++ spin_unlock(&drm_mem_lock); ++#endif ++ ++ /* pci_alloc_consistent only guarantees alignment to the smallest ++ * PAGE_SIZE order which is greater than or equal to the requested size. ++ * Return NULL here for now to make sure nobody tries for larger alignment ++ */ ++ if (align > size) ++ return NULL; ++ ++ if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { ++ DRM_ERROR("Setting pci dma mask failed\n"); ++ return NULL; ++ } ++ ++ dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); ++ if (!dmah) ++ return NULL; ++ ++ dmah->size = size; ++ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); ++ ++#ifdef DRM_DEBUG_MEMORY ++ if (dmah->vaddr == NULL) { ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].fail_count; ++ spin_unlock(&drm_mem_lock); ++ kfree(dmah); ++ return NULL; ++ } ++ ++ spin_lock(&drm_mem_lock); ++ ++drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_allocated += size; ++ drm_ram_used += size; ++ spin_unlock(&drm_mem_lock); ++#else ++ if (dmah->vaddr == NULL) { ++ kfree(dmah); ++ return NULL; ++ } ++#endif ++ ++ memset(dmah->vaddr, 0, size); ++ ++ /* XXX - Is virt_to_page() legal for consistent mem? */ ++ /* Reserve */ ++ for (addr = (unsigned long)dmah->vaddr, sz = size; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ SetPageReserved(virt_to_page(addr)); ++ } ++ ++ return dmah; ++} ++EXPORT_SYMBOL(drm_pci_alloc); ++ ++/** ++ * \brief Free a PCI consistent memory block without freeing its descriptor. ++ * ++ * This function is for internal use in the Linux-specific DRM core code. ++ */ ++void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah) ++{ ++ unsigned long addr; ++ size_t sz; ++#ifdef DRM_DEBUG_MEMORY ++ int area = DRM_MEM_DMA; ++ int alloc_count; ++ int free_count; ++#endif ++ ++ if (!dmah->vaddr) { ++#ifdef DRM_DEBUG_MEMORY ++ DRM_MEM_ERROR(area, "Attempt to free address 0\n"); ++#endif ++ } else { ++ /* XXX - Is virt_to_page() legal for consistent mem? */ ++ /* Unreserve */ ++ for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; ++ sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ++ ClearPageReserved(virt_to_page(addr)); ++ } ++ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, ++ dmah->busaddr); ++ } ++ ++#ifdef DRM_DEBUG_MEMORY ++ spin_lock(&drm_mem_lock); ++ free_count = ++drm_mem_stats[area].free_count; ++ alloc_count = drm_mem_stats[area].succeed_count; ++ drm_mem_stats[area].bytes_freed += size; ++ drm_ram_used -= size; ++ spin_unlock(&drm_mem_lock); ++ if (free_count > alloc_count) { ++ DRM_MEM_ERROR(area, ++ "Excess frees: %d frees, %d allocs\n", ++ free_count, alloc_count); ++ } ++#endif ++ ++} ++ ++/** ++ * \brief Free a PCI consistent memory block ++ */ ++void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah) ++{ ++ __drm_pci_free(dev, dmah); ++ kfree(dmah); ++} ++EXPORT_SYMBOL(drm_pci_free); ++ ++/*@}*/ +diff -Nurd git/drivers/gpu/drm-tungsten/drm_pciids.h git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h +--- git/drivers/gpu/drm-tungsten/drm_pciids.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,614 @@ ++/* ++ This file is auto-generated from the drm_pciids.txt in the DRM CVS ++ Please contact dri-devel@lists.sf.net to add new cards to this list ++*/ ++#define radeon_PCI_IDS \ ++ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ ++ {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ ++ {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ ++ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ ++ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ ++ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ ++ {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ ++ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \ ++ {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ ++ {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ ++ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ ++ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ ++ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ ++ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ ++ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ ++ {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ ++ {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ ++ {0, 0, 0} ++ ++#define r128_PCI_IDS \ ++ {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define mga_PCI_IDS \ ++ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ ++ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ ++ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ ++ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ ++ {0, 0, 0} ++ ++#define mach64_PCI_IDS \ ++ {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define sis_PCI_IDS \ ++ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ ++ {0, 0, 0} ++ ++#define pvr2d_PCI_IDS \ ++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define tdfx_PCI_IDS \ ++ {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define viadrv_PCI_IDS \ ++ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ ++ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ ++ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ ++ {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ ++ {0, 0, 0} ++ ++#define i810_PCI_IDS \ ++ {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define i830_PCI_IDS \ ++ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define gamma_PCI_IDS \ ++ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} ++ ++#define savage_PCI_IDS \ ++ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ ++ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ ++ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ ++ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ ++ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ ++ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ ++ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ ++ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ ++ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ ++ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ ++ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ ++ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ ++ {0, 0, 0} ++ ++#define ffb_PCI_IDS \ ++ {0, 0, 0} ++ ++#define i915_PCI_IDS \ ++ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \ ++ {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x27A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x27AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x29A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2A02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2A12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x29C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x29B2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x29D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \ ++ {0x8086, 0x2A42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0x8086, 0x2E22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \ ++ {0, 0, 0} ++ ++#define imagine_PCI_IDS \ ++ {0x105d, 0x2309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128}, \ ++ {0x105d, 0x2339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128_2}, \ ++ {0x105d, 0x493d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_T2R}, \ ++ {0x105d, 0x5348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_REV4}, \ ++ {0, 0, 0} ++ ++#define nv_PCI_IDS \ ++ {0x10DE, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x002D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x00A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \ ++ {0x10DE, 0x0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0171, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0172, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0173, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0174, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0175, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0176, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0177, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0178, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0179, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x017D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0189, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x018D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x01A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x01F0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \ ++ {0x10DE, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0251, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0258, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0259, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x025B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0282, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x028C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \ ++ {0x10DE, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x031F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0323, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0327, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0329, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x032F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0331, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0332, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0333, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x033F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0334, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0338, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0342, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0345, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x034F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \ ++ {0x10DE, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x004E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x00CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10de, 0x00f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10de, 0x00f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x014F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0163, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0164, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0165, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0166, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0167, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x016E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0228, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0091, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0092, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0094, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0098, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x0099, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0x10DE, 0x009E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \ ++ {0, 0, 0} ++ ++#define xgi_PCI_IDS \ ++ {0x18ca, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0x18ca, 0x0047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ ++ {0, 0, 0} +diff -Nurd git/drivers/gpu/drm-tungsten/drmP.h git-nokia/drivers/gpu/drm-tungsten/drmP.h +--- git/drivers/gpu/drm-tungsten/drmP.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drmP.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1507 @@ ++/** ++ * \file drmP.h ++ * Private header for Direct Rendering Manager ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_P_H_ ++#define _DRM_P_H_ ++ ++#ifdef __KERNEL__ ++#ifdef __alpha__ ++/* add include of current.h so that "current" is defined ++ * before static inline funcs in wait.h. Doing this so we ++ * can build the DRM (part of PI DRI). 4/21/2000 S + B */ ++#include ++#endif /* __alpha__ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* For (un)lock_kernel */ ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) ++#include ++#endif ++#if defined(__alpha__) || defined(__powerpc__) ++#include /* For pte_wrprotect */ ++#endif ++#include ++#include ++#include ++#ifdef CONFIG_MTRR ++#include ++#endif ++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) ++#include ++#include ++#include ++#endif ++#include ++#include ++#include ++#include "drm.h" ++#include ++#include ++ ++#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) ++#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) ++ ++#include "drm_os_linux.h" ++#include "drm_hashtab.h" ++#include "drm_internal.h" ++ ++struct drm_device; ++struct drm_file; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ++typedef unsigned long uintptr_t; ++#endif ++ ++/* If you want the memory alloc debug functionality, change define below */ ++/* #define DEBUG_MEMORY */ ++ ++/***********************************************************************/ ++/** \name DRM template customization defaults */ ++/*@{*/ ++ ++/* driver capabilities and requirements mask */ ++#define DRIVER_USE_AGP 0x1 ++#define DRIVER_REQUIRE_AGP 0x2 ++#define DRIVER_USE_MTRR 0x4 ++#define DRIVER_PCI_DMA 0x8 ++#define DRIVER_SG 0x10 ++#define DRIVER_HAVE_DMA 0x20 ++#define DRIVER_HAVE_IRQ 0x40 ++#define DRIVER_IRQ_SHARED 0x80 ++#define DRIVER_DMA_QUEUE 0x100 ++#define DRIVER_FB_DMA 0x200 ++#define DRIVER_GEM 0x400 ++ ++/*@}*/ ++ ++/***********************************************************************/ ++/** \name Begin the DRM... */ ++/*@{*/ ++ ++#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then ++ also include looping detection. */ ++ ++#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ ++#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ ++#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ ++#define DRM_LOOPING_LIMIT 5000000 ++#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ ++#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ ++ ++#define DRM_FLAG_DEBUG 0x01 ++ ++#define DRM_MEM_DMA 0 ++#define DRM_MEM_SAREA 1 ++#define DRM_MEM_DRIVER 2 ++#define DRM_MEM_MAGIC 3 ++#define DRM_MEM_IOCTLS 4 ++#define DRM_MEM_MAPS 5 ++#define DRM_MEM_VMAS 6 ++#define DRM_MEM_BUFS 7 ++#define DRM_MEM_SEGS 8 ++#define DRM_MEM_PAGES 9 ++#define DRM_MEM_FILES 10 ++#define DRM_MEM_QUEUES 11 ++#define DRM_MEM_CMDS 12 ++#define DRM_MEM_MAPPINGS 13 ++#define DRM_MEM_BUFLISTS 14 ++#define DRM_MEM_AGPLISTS 15 ++#define DRM_MEM_TOTALAGP 16 ++#define DRM_MEM_BOUNDAGP 17 ++#define DRM_MEM_CTXBITMAP 18 ++#define DRM_MEM_STUB 19 ++#define DRM_MEM_SGLISTS 20 ++#define DRM_MEM_CTXLIST 21 ++#define DRM_MEM_MM 22 ++#define DRM_MEM_HASHTAB 23 ++#define DRM_MEM_OBJECTS 24 ++#define DRM_MEM_FENCE 25 ++#define DRM_MEM_TTM 26 ++#define DRM_MEM_BUFOBJ 27 ++ ++#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) ++#define DRM_MAP_HASH_OFFSET 0x10000000 ++#define DRM_MAP_HASH_ORDER 12 ++#define DRM_OBJECT_HASH_ORDER 12 ++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) ++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) ++/* ++ * This should be small enough to allow the use of kmalloc for hash tables ++ * instead of vmalloc. ++ */ ++ ++#define DRM_FILE_HASH_ORDER 8 ++#define DRM_MM_INIT_MAX_PAGES 256 ++ ++/*@}*/ ++ ++#include "drm_compat.h" ++ ++/***********************************************************************/ ++/** \name Macros to make printk easier */ ++/*@{*/ ++ ++/** ++ * Error output. ++ * ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#define DRM_ERROR(fmt, arg...) \ ++ printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) ++ ++/** ++ * Memory error output. ++ * ++ * \param area memory area where the error occurred. ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#define DRM_MEM_ERROR(area, fmt, arg...) \ ++ printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ ++ drm_mem_stats[area].name , ##arg) ++#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) ++ ++/** ++ * Debug output. ++ * ++ * \param fmt printf() like format string. ++ * \param arg arguments ++ */ ++#if DRM_DEBUG_CODE ++#define DRM_DEBUG(fmt, arg...) \ ++ do { \ ++ if ( drm_debug ) \ ++ printk(KERN_DEBUG \ ++ "[" DRM_NAME ":%s] " fmt , \ ++ __FUNCTION__ , ##arg); \ ++ } while (0) ++#else ++#define DRM_DEBUG(fmt, arg...) do { } while (0) ++#endif ++ ++#define DRM_PROC_LIMIT (PAGE_SIZE-80) ++ ++#define DRM_PROC_PRINT(fmt, arg...) \ ++ len += sprintf(&buf[len], fmt , ##arg); \ ++ if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } ++ ++#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ ++ len += sprintf(&buf[len], fmt , ##arg); \ ++ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } ++ ++/*@}*/ ++ ++/***********************************************************************/ ++/** \name Internal types and structures */ ++/*@{*/ ++ ++#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) ++#define DRM_MIN(a,b) min(a,b) ++#define DRM_MAX(a,b) max(a,b) ++ ++#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) ++#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) ++#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) ++ ++#define DRM_IF_VERSION(maj, min) (maj << 16 | min) ++/** ++ * Get the private SAREA mapping. ++ * ++ * \param _dev DRM device. ++ * \param _ctx context number. ++ * \param _map output mapping. ++ */ ++#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ ++ (_map) = (_dev)->context_sareas[_ctx]; \ ++} while(0) ++ ++/** ++ * Test that the hardware lock is held by the caller, returning otherwise. ++ * ++ * \param dev DRM device. ++ * \param file_priv DRM file private pointer of the caller. ++ */ ++#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ ++do { \ ++ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ ++ dev->lock.file_priv != file_priv ) { \ ++ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ ++ __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ ++ dev->lock.file_priv, file_priv ); \ ++ return -EINVAL; \ ++ } \ ++} while (0) ++ ++/** ++ * Copy and IOCTL return string to user space ++ */ ++#define DRM_COPY( name, value ) \ ++ len = strlen( value ); \ ++ if ( len > name##_len ) len = name##_len; \ ++ name##_len = strlen( value ); \ ++ if ( len && name ) { \ ++ if ( copy_to_user( name, value, len ) ) \ ++ return -EFAULT; \ ++ } ++ ++/** ++ * Ioctl function type. ++ * ++ * \param dev DRM device structure ++ * \param data pointer to kernel-space stored data, copied in and out according ++ * to ioctl description. ++ * \param file_priv DRM file private pointer. ++ */ ++typedef int drm_ioctl_t(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++#define DRM_AUTH 0x1 ++#define DRM_MASTER 0x2 ++#define DRM_ROOT_ONLY 0x4 ++ ++struct drm_ioctl_desc { ++ unsigned int cmd; ++ drm_ioctl_t *func; ++ int flags; ++}; ++/** ++ * Creates a driver or general drm_ioctl_desc array entry for the given ++ * ioctl, for use by drm_ioctl(). ++ */ ++#define DRM_IOCTL_DEF(ioctl, func, flags) \ ++ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} ++ ++struct drm_magic_entry { ++ struct list_head head; ++ struct drm_hash_item hash_item; ++ struct drm_file *priv; ++}; ++ ++struct drm_vma_entry { ++ struct list_head head; ++ struct vm_area_struct *vma; ++ pid_t pid; ++}; ++ ++/** ++ * DMA buffer. ++ */ ++struct drm_buf { ++ int idx; /**< Index into master buflist */ ++ int total; /**< Buffer size */ ++ int order; /**< log-base-2(total) */ ++ int used; /**< Amount of buffer in use (for DMA) */ ++ unsigned long offset; /**< Byte offset (used internally) */ ++ void *address; /**< Address of buffer */ ++ unsigned long bus_address; /**< Bus address of buffer */ ++ struct drm_buf *next; /**< Kernel-only: used for free list */ ++ __volatile__ int waiting; /**< On kernel DMA queue */ ++ __volatile__ int pending; /**< On hardware DMA queue */ ++ wait_queue_head_t dma_wait; /**< Processes waiting */ ++ struct drm_file *file_priv; /**< Private of holding file descr */ ++ int context; /**< Kernel queue for this buffer */ ++ int while_locked; /**< Dispatch this buffer while locked */ ++ enum { ++ DRM_LIST_NONE = 0, ++ DRM_LIST_FREE = 1, ++ DRM_LIST_WAIT = 2, ++ DRM_LIST_PEND = 3, ++ DRM_LIST_PRIO = 4, ++ DRM_LIST_RECLAIM = 5 ++ } list; /**< Which list we're on */ ++ ++ int dev_priv_size; /**< Size of buffer private storage */ ++ void *dev_private; /**< Per-buffer private storage */ ++}; ++ ++/** bufs is one longer than it has to be */ ++struct drm_waitlist { ++ int count; /**< Number of possible buffers */ ++ struct drm_buf **bufs; /**< List of pointers to buffers */ ++ struct drm_buf **rp; /**< Read pointer */ ++ struct drm_buf **wp; /**< Write pointer */ ++ struct drm_buf **end; /**< End pointer */ ++ spinlock_t read_lock; ++ spinlock_t write_lock; ++}; ++ ++struct drm_freelist { ++ int initialized; /**< Freelist in use */ ++ atomic_t count; /**< Number of free buffers */ ++ struct drm_buf *next; /**< End pointer */ ++ ++ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ ++ int low_mark; /**< Low water mark */ ++ int high_mark; /**< High water mark */ ++ atomic_t wfh; /**< If waiting for high mark */ ++ spinlock_t lock; ++}; ++ ++typedef struct drm_dma_handle { ++ dma_addr_t busaddr; ++ void *vaddr; ++ size_t size; ++} drm_dma_handle_t; ++ ++/** ++ * Buffer entry. There is one of this for each buffer size order. ++ */ ++struct drm_buf_entry { ++ int buf_size; /**< size */ ++ int buf_count; /**< number of buffers */ ++ struct drm_buf *buflist; /**< buffer list */ ++ int seg_count; ++ int page_order; ++ struct drm_dma_handle **seglist; ++ struct drm_freelist freelist; ++}; ++ ++ ++enum drm_ref_type { ++ _DRM_REF_USE = 0, ++ _DRM_REF_TYPE1, ++ _DRM_NO_REF_TYPES ++}; ++ ++ ++/** File private data */ ++struct drm_file { ++ int authenticated; ++ int master; ++ pid_t pid; ++ uid_t uid; ++ drm_magic_t magic; ++ unsigned long ioctl_count; ++ struct list_head lhead; ++ struct drm_minor *minor; ++ int remove_auth_on_close; ++ unsigned long lock_count; ++ ++ /* ++ * The user object hash table is global and resides in the ++ * drm_device structure. We protect the lists and hash tables with the ++ * device struct_mutex. A bit coarse-grained but probably the best ++ * option. ++ */ ++ ++ struct list_head refd_objects; ++ ++ /** Mapping of mm object handles to object pointers. */ ++ struct idr object_idr; ++ /** Lock for synchronization of access to object_idr. */ ++ spinlock_t table_lock; ++ ++ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; ++ struct file *filp; ++ void *driver_priv; ++}; ++ ++/** Wait queue */ ++struct drm_queue { ++ atomic_t use_count; /**< Outstanding uses (+1) */ ++ atomic_t finalization; /**< Finalization in progress */ ++ atomic_t block_count; /**< Count of processes waiting */ ++ atomic_t block_read; /**< Queue blocked for reads */ ++ wait_queue_head_t read_queue; /**< Processes waiting on block_read */ ++ atomic_t block_write; /**< Queue blocked for writes */ ++ wait_queue_head_t write_queue; /**< Processes waiting on block_write */ ++#if 1 ++ atomic_t total_queued; /**< Total queued statistic */ ++ atomic_t total_flushed; /**< Total flushes statistic */ ++ atomic_t total_locks; /**< Total locks statistics */ ++#endif ++ enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ ++ struct drm_waitlist waitlist; /**< Pending buffers */ ++ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ ++}; ++ ++/** ++ * Lock data. ++ */ ++struct drm_lock_data { ++ struct drm_hw_lock *hw_lock; /**< Hardware lock */ ++ /** Private of lock holder's file (NULL=kernel) */ ++ struct drm_file *file_priv; ++ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ ++ unsigned long lock_time; /**< Time of last lock in jiffies */ ++ spinlock_t spinlock; ++ uint32_t kernel_waiters; ++ uint32_t user_waiters; ++ int idle_has_lock; ++}; ++ ++/** ++ * DMA data. ++ */ ++struct drm_device_dma { ++ ++ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ ++ int buf_count; /**< total number of buffers */ ++ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ ++ int seg_count; ++ int page_count; /**< number of pages */ ++ unsigned long *pagelist; /**< page list */ ++ unsigned long byte_count; ++ enum { ++ _DRM_DMA_USE_AGP = 0x01, ++ _DRM_DMA_USE_SG = 0x02, ++ _DRM_DMA_USE_FB = 0x04, ++ _DRM_DMA_USE_PCI_RO = 0x08 ++ } flags; ++ ++}; ++ ++/** ++ * AGP memory entry. Stored as a doubly linked list. ++ */ ++struct drm_agp_mem { ++ unsigned long handle; /**< handle */ ++ DRM_AGP_MEM *memory; ++ unsigned long bound; /**< address */ ++ int pages; ++ struct list_head head; ++}; ++ ++/** ++ * AGP data. ++ * ++ * \sa drm_agp_init() and drm_device::agp. ++ */ ++struct drm_agp_head { ++ DRM_AGP_KERN agp_info; /**< AGP device information */ ++ struct list_head memory; ++ unsigned long mode; /**< AGP mode */ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) ++ struct agp_bridge_data *bridge; ++#endif ++ int enabled; /**< whether the AGP bus as been enabled */ ++ int acquired; /**< whether the AGP device has been acquired */ ++ unsigned long base; ++ int agp_mtrr; ++ int cant_use_aperture; ++ unsigned long page_mask; ++}; ++ ++/** ++ * Scatter-gather memory. ++ */ ++struct drm_sg_mem { ++ unsigned long handle; ++ void *virtual; ++ int pages; ++ struct page **pagelist; ++ dma_addr_t *busaddr; ++}; ++ ++struct drm_sigdata { ++ int context; ++ struct drm_hw_lock *lock; ++}; ++ ++ ++/* ++ * Generic memory manager structs ++ */ ++ ++struct drm_mm_node { ++ struct list_head fl_entry; ++ struct list_head ml_entry; ++ int free; ++ unsigned long start; ++ unsigned long size; ++ struct drm_mm *mm; ++ void *private; ++}; ++ ++struct drm_mm { ++ struct list_head fl_entry; ++ struct list_head ml_entry; ++}; ++ ++ ++/** ++ * Mappings list ++ */ ++struct drm_map_list { ++ struct list_head head; /**< list head */ ++ struct drm_hash_item hash; ++ struct drm_map *map; /**< mapping */ ++ uint64_t user_token; ++ struct drm_mm_node *file_offset_node; ++}; ++ ++typedef struct drm_map drm_local_map_t; ++ ++/** ++ * Context handle list ++ */ ++struct drm_ctx_list { ++ struct list_head head; /**< list head */ ++ drm_context_t handle; /**< context handle */ ++ struct drm_file *tag; /**< associated fd private data */ ++}; ++ ++struct drm_vbl_sig { ++ struct list_head head; ++ unsigned int sequence; ++ struct siginfo info; ++ struct task_struct *task; ++}; ++ ++/* location of GART table */ ++#define DRM_ATI_GART_MAIN 1 ++#define DRM_ATI_GART_FB 2 ++ ++#define DRM_ATI_GART_PCI 1 ++#define DRM_ATI_GART_PCIE 2 ++#define DRM_ATI_GART_IGP 3 ++ ++struct drm_ati_pcigart_info { ++ int gart_table_location; ++ int gart_reg_if; ++ void *addr; ++ dma_addr_t bus_addr; ++ dma_addr_t table_mask; ++ dma_addr_t member_mask; ++ struct drm_dma_handle *table_handle; ++ drm_local_map_t mapping; ++ int table_size; ++}; ++ ++/** ++ * This structure defines the drm_mm memory object, which will be used by the ++ * DRM for its buffer objects. ++ */ ++struct drm_gem_object { ++ /** Reference count of this object */ ++ struct kref refcount; ++ ++ /** Handle count of this object. Each handle also holds a reference */ ++ struct kref handlecount; ++ ++ /** Related drm device */ ++ struct drm_device *dev; ++ ++ /** File representing the shmem storage */ ++ struct file *filp; ++ ++ /** ++ * Size of the object, in bytes. Immutable over the object's ++ * lifetime. ++ */ ++ size_t size; ++ ++ /** ++ * Global name for this object, starts at 1. 0 means unnamed. ++ * Access is covered by the object_name_lock in the related drm_device ++ */ ++ int name; ++ ++ /** ++ * Memory domains. These monitor which caches contain read/write data ++ * related to the object. When transitioning from one set of domains ++ * to another, the driver is called to ensure that caches are suitably ++ * flushed and invalidated ++ */ ++ uint32_t read_domains; ++ uint32_t write_domain; ++ ++ /** ++ * While validating an exec operation, the ++ * new read/write domain values are computed here. ++ * They will be transferred to the above values ++ * at the point that any cache flushing occurs ++ */ ++ uint32_t pending_read_domains; ++ uint32_t pending_write_domain; ++ ++ void *driver_private; ++}; ++ ++#include "drm_objects.h" ++ ++/** ++ * DRM driver structure. This structure represent the common code for ++ * a family of cards. There will one drm_device for each card present ++ * in this family ++ */ ++ ++struct drm_driver { ++ int (*load) (struct drm_device *, unsigned long flags); ++ int (*firstopen) (struct drm_device *); ++ int (*open) (struct drm_device *, struct drm_file *); ++ void (*preclose) (struct drm_device *, struct drm_file *file_priv); ++ void (*postclose) (struct drm_device *, struct drm_file *); ++ void (*lastclose) (struct drm_device *); ++ int (*unload) (struct drm_device *); ++ int (*suspend) (struct drm_device *, pm_message_t state); ++ int (*resume) (struct drm_device *); ++ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); ++ void (*dma_ready) (struct drm_device *); ++ int (*dma_quiescent) (struct drm_device *); ++ int (*context_ctor) (struct drm_device *dev, int context); ++ int (*context_dtor) (struct drm_device *dev, int context); ++ int (*kernel_context_switch) (struct drm_device *dev, int old, ++ int new); ++ void (*kernel_context_switch_unlock) (struct drm_device * dev); ++ /** ++ * get_vblank_counter - get raw hardware vblank counter ++ * @dev: DRM device ++ * @crtc: counter to fetch ++ * ++ * Driver callback for fetching a raw hardware vblank counter ++ * for @crtc. If a device doesn't have a hardware counter, the ++ * driver can simply return the value of drm_vblank_count and ++ * make the enable_vblank() and disable_vblank() hooks into no-ops, ++ * leaving interrupts enabled at all times. ++ * ++ * Wraparound handling and loss of events due to modesetting is dealt ++ * with in the DRM core code. ++ * ++ * RETURNS ++ * Raw vblank counter value. ++ */ ++ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); ++ ++ /** ++ * enable_vblank - enable vblank interrupt events ++ * @dev: DRM device ++ * @crtc: which irq to enable ++ * ++ * Enable vblank interrupts for @crtc. If the device doesn't have ++ * a hardware vblank counter, this routine should be a no-op, since ++ * interrupts will have to stay on to keep the count accurate. ++ * ++ * RETURNS ++ * Zero on success, appropriate errno if the given @crtc's vblank ++ * interrupt cannot be enabled. ++ */ ++ int (*enable_vblank) (struct drm_device *dev, int crtc); ++ ++ /** ++ * disable_vblank - disable vblank interrupt events ++ * @dev: DRM device ++ * @crtc: which irq to enable ++ * ++ * Disable vblank interrupts for @crtc. If the device doesn't have ++ * a hardware vblank counter, this routine should be a no-op, since ++ * interrupts will have to stay on to keep the count accurate. ++ */ ++ void (*disable_vblank) (struct drm_device *dev, int crtc); ++ int (*dri_library_name) (struct drm_device *dev, char * buf); ++ ++ /** ++ * Called by \c drm_device_is_agp. Typically used to determine if a ++ * card is really attached to AGP or not. ++ * ++ * \param dev DRM device handle ++ * ++ * \returns ++ * One of three values is returned depending on whether or not the ++ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP ++ * (return of 1), or may or may not be AGP (return of 2). ++ */ ++ int (*device_is_agp) (struct drm_device *dev); ++ ++/* these have to be filled in */ ++ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); ++ void (*irq_preinstall) (struct drm_device *dev); ++ int (*irq_postinstall) (struct drm_device *dev); ++ void (*irq_uninstall) (struct drm_device *dev); ++ void (*reclaim_buffers) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ void (*reclaim_buffers_locked) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ void (*reclaim_buffers_idlelocked) (struct drm_device *dev, ++ struct drm_file *file_priv); ++ unsigned long (*get_map_ofs) (struct drm_map *map); ++ unsigned long (*get_reg_ofs) (struct drm_device *dev); ++ void (*set_version) (struct drm_device *dev, ++ struct drm_set_version *sv); ++ ++ int (*proc_init)(struct drm_minor *minor); ++ void (*proc_cleanup)(struct drm_minor *minor); ++ ++ /** ++ * Driver-specific constructor for drm_gem_objects, to set up ++ * obj->driver_private. ++ * ++ * Returns 0 on success. ++ */ ++ int (*gem_init_object) (struct drm_gem_object *obj); ++ void (*gem_free_object) (struct drm_gem_object *obj); ++ ++ struct drm_fence_driver *fence_driver; ++ struct drm_bo_driver *bo_driver; ++ ++ int major; ++ int minor; ++ int patchlevel; ++ char *name; ++ char *desc; ++ char *date; ++ ++/* variables */ ++ u32 driver_features; ++ int dev_priv_size; ++ struct drm_ioctl_desc *ioctls; ++ int num_ioctls; ++ struct file_operations fops; ++ struct pci_driver pci_driver; ++}; ++ ++#define DRM_MINOR_UNASSIGNED 0 ++#define DRM_MINOR_LEGACY 1 ++ ++/** ++ * DRM minor structure. This structure represents a drm minor number. ++ */ ++struct drm_minor { ++ int index; /**< Minor device number */ ++ int type; /**< Control or render */ ++ dev_t device; /**< Device number for mknod */ ++ struct device kdev; /**< Linux device */ ++ struct drm_device *dev; ++ struct proc_dir_entry *dev_root; /**< proc directory entry */ ++ struct class_device *dev_class; ++}; ++ ++ ++/** ++ * DRM device structure. This structure represent a complete card that ++ * may contain multiple heads. ++ */ ++struct drm_device { ++ char *unique; /**< Unique identifier: e.g., busid */ ++ int unique_len; /**< Length of unique field */ ++ char *devname; /**< For /proc/interrupts */ ++ int if_version; /**< Highest interface version set */ ++ ++ int blocked; /**< Blocked due to VC switch? */ ++ ++ /** \name Locks */ ++ /*@{ */ ++ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ ++ struct mutex struct_mutex; /**< For others */ ++ /*@} */ ++ ++ /** \name Usage Counters */ ++ /*@{ */ ++ int open_count; /**< Outstanding files open */ ++ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ ++ atomic_t vma_count; /**< Outstanding vma areas open */ ++ int buf_use; /**< Buffers in use -- cannot alloc */ ++ atomic_t buf_alloc; /**< Buffer allocation in progress */ ++ /*@} */ ++ ++ /** \name Performance counters */ ++ /*@{ */ ++ unsigned long counters; ++ enum drm_stat_type types[15]; ++ atomic_t counts[15]; ++ /*@} */ ++ ++ /** \name Authentication */ ++ /*@{ */ ++ struct list_head filelist; ++ struct drm_open_hash magiclist; ++ struct list_head magicfree; ++ /*@} */ ++ ++ /** \name Memory management */ ++ /*@{ */ ++ struct list_head maplist; /**< Linked list of regions */ ++ int map_count; /**< Number of mappable regions */ ++ struct drm_open_hash map_hash; /**< User token hash table for maps */ ++ struct drm_mm offset_manager; /**< User token manager */ ++ struct drm_open_hash object_hash; /**< User token hash table for objects */ ++ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ ++ struct page *ttm_dummy_page; ++ ++ /** \name Context handle management */ ++ /*@{ */ ++ struct list_head ctxlist; /**< Linked list of context handles */ ++ int ctx_count; /**< Number of context handles */ ++ struct mutex ctxlist_mutex; /**< For ctxlist */ ++ ++ struct idr ctx_idr; ++ ++ struct list_head vmalist; /**< List of vmas (for debugging) */ ++ struct drm_lock_data lock; /**< Information on hardware lock */ ++ /*@} */ ++ ++ /** \name DMA queues (contexts) */ ++ /*@{ */ ++ int queue_count; /**< Number of active DMA queues */ ++ int queue_reserved; /**< Number of reserved DMA queues */ ++ int queue_slots; /**< Actual length of queuelist */ ++ struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ ++ struct drm_device_dma *dma; /**< Optional pointer for DMA support */ ++ /*@} */ ++ ++ /** \name Context support */ ++ /*@{ */ ++ int irq; /**< Interrupt used by board */ ++ int irq_enabled; /**< True if irq handler is enabled */ ++ __volatile__ long context_flag; /**< Context swapping flag */ ++ __volatile__ long interrupt_flag; /**< Interruption handler flag */ ++ __volatile__ long dma_flag; /**< DMA dispatch flag */ ++ struct timer_list timer; /**< Timer for delaying ctx switch */ ++ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ ++ int last_checked; /**< Last context checked for DMA */ ++ int last_context; /**< Last current context */ ++ unsigned long last_switch; /**< jiffies at last context switch */ ++ /*@} */ ++ ++ struct work_struct work; ++ ++ /** \name VBLANK IRQ support */ ++ /*@{ */ ++ ++ /* ++ * At load time, disabling the vblank interrupt won't be allowed since ++ * old clients may not call the modeset ioctl and therefore misbehave. ++ * Once the modeset ioctl *has* been called though, we can safely ++ * disable them when unused. ++ */ ++ int vblank_disable_allowed; ++ ++ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ ++ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ ++ spinlock_t vbl_lock; ++ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */ ++ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ ++ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ ++ u32 *last_vblank; /* protected by dev->vbl_lock, used */ ++ /* for wraparound handling */ ++ int *vblank_enabled; /* so we don't call enable more than ++ once per disable */ ++ int *vblank_inmodeset; /* Display driver is setting mode */ ++ struct timer_list vblank_disable_timer; ++ ++ u32 max_vblank_count; /**< size of vblank counter register */ ++ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ ++ void (*locked_tasklet_func)(struct drm_device *dev); ++ ++ /*@} */ ++ cycles_t ctx_start; ++ cycles_t lck_start; ++ ++ struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ ++ wait_queue_head_t buf_readers; /**< Processes waiting to read */ ++ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ ++ ++ struct drm_agp_head *agp; /**< AGP data */ ++ ++ struct pci_dev *pdev; /**< PCI device structure */ ++ int pci_vendor; /**< PCI vendor id */ ++ int pci_device; /**< PCI device id */ ++#ifdef __alpha__ ++ struct pci_controller *hose; ++#endif ++ int num_crtcs; /**< Number of CRTCs on this device */ ++ struct drm_sg_mem *sg; /**< Scatter gather memory */ ++ void *dev_private; /**< device private data */ ++ struct drm_sigdata sigdata; /**< For block_all_signals */ ++ sigset_t sigmask; ++ ++ struct drm_driver *driver; ++ drm_local_map_t *agp_buffer_map; ++ unsigned int agp_buffer_token; ++ struct drm_minor *primary; /**< render type primary screen head */ ++ ++ struct drm_fence_manager fm; ++ struct drm_buffer_manager bm; ++ ++ /** \name Drawable information */ ++ /*@{ */ ++ spinlock_t drw_lock; ++ struct idr drw_idr; ++ /*@} */ ++ ++ /** \name GEM information */ ++ /*@{ */ ++ spinlock_t object_name_lock; ++ struct idr object_name_idr; ++ atomic_t object_count; ++ atomic_t object_memory; ++ atomic_t pin_count; ++ atomic_t pin_memory; ++ atomic_t gtt_count; ++ atomic_t gtt_memory; ++ uint32_t gtt_total; ++ uint32_t invalidate_domains; /* domains pending invalidation */ ++ uint32_t flush_domains; /* domains pending flush */ ++ /*@} */ ++}; ++ ++#if __OS_HAS_AGP ++struct drm_agp_ttm_backend { ++ struct drm_ttm_backend backend; ++ DRM_AGP_MEM *mem; ++ struct agp_bridge_data *bridge; ++ int populated; ++}; ++#endif ++ ++ ++static __inline__ int drm_core_check_feature(struct drm_device *dev, ++ int feature) ++{ ++ return ((dev->driver->driver_features & feature) ? 1 : 0); ++} ++ ++#ifdef __alpha__ ++#define drm_get_pci_domain(dev) dev->hose->index ++#else ++#define drm_get_pci_domain(dev) 0 ++#endif ++ ++#if __OS_HAS_AGP ++static inline int drm_core_has_AGP(struct drm_device *dev) ++{ ++ return drm_core_check_feature(dev, DRIVER_USE_AGP); ++} ++#else ++#define drm_core_has_AGP(dev) (0) ++#endif ++ ++#if __OS_HAS_MTRR ++static inline int drm_core_has_MTRR(struct drm_device *dev) ++{ ++ return drm_core_check_feature(dev, DRIVER_USE_MTRR); ++} ++ ++#define DRM_MTRR_WC MTRR_TYPE_WRCOMB ++ ++static inline int drm_mtrr_add(unsigned long offset, unsigned long size, ++ unsigned int flags) ++{ ++ return mtrr_add(offset, size, flags, 1); ++} ++ ++static inline int drm_mtrr_del(int handle, unsigned long offset, ++ unsigned long size, unsigned int flags) ++{ ++ return mtrr_del(handle, offset, size); ++} ++ ++#else ++static inline int drm_mtrr_add(unsigned long offset, unsigned long size, ++ unsigned int flags) ++{ ++ return -ENODEV; ++} ++ ++static inline int drm_mtrr_del(int handle, unsigned long offset, ++ unsigned long size, unsigned int flags) ++{ ++ return -ENODEV; ++} ++ ++#define drm_core_has_MTRR(dev) (0) ++#define DRM_MTRR_WC 0 ++#endif ++ ++ ++/******************************************************************/ ++/** \name Internal function definitions */ ++/*@{*/ ++ ++ /* Driver support (drm_drv.h) */ ++extern int drm_fb_loaded; ++extern int drm_init(struct drm_driver *driver, ++ struct pci_device_id *pciidlist); ++extern void drm_exit(struct drm_driver *driver); ++extern void drm_cleanup_pci(struct pci_dev *pdev); ++extern int drm_ioctl(struct inode *inode, struct file *filp, ++ unsigned int cmd, unsigned long arg); ++extern long drm_unlocked_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg); ++extern long drm_compat_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg); ++ ++extern int drm_lastclose(struct drm_device *dev); ++ ++ /* Device support (drm_fops.h) */ ++extern int drm_open(struct inode *inode, struct file *filp); ++extern int drm_stub_open(struct inode *inode, struct file *filp); ++extern int drm_fasync(int fd, struct file *filp, int on); ++extern int drm_release(struct inode *inode, struct file *filp); ++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); ++ ++ /* Mapping support (drm_vm.h) */ ++extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); ++extern unsigned long drm_core_get_map_ofs(struct drm_map * map); ++extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); ++extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); ++ ++ /* Memory management support (drm_memory.h) */ ++#include "drm_memory.h" ++extern void drm_mem_init(void); ++extern int drm_mem_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++extern void *drm_calloc(size_t nmemb, size_t size, int area); ++extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); ++extern unsigned long drm_alloc_pages(int order, int area); ++extern void drm_free_pages(unsigned long address, int order, int area); ++extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); ++extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); ++extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); ++extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, ++ struct page **pages, ++ unsigned long num_pages, ++ uint32_t gtt_offset); ++extern int drm_unbind_agp(DRM_AGP_MEM * handle); ++ ++extern void drm_free_memctl(size_t size); ++extern int drm_alloc_memctl(size_t size); ++extern void drm_query_memctl(uint64_t *cur_used, ++ uint64_t *emer_used, ++ uint64_t *low_threshold, ++ uint64_t *high_threshold, ++ uint64_t *emer_threshold); ++extern void drm_init_memctl(size_t low_threshold, ++ size_t high_threshold, ++ size_t unit_size); ++ ++ /* Misc. IOCTL support (drm_ioctl.h) */ ++extern int drm_irq_by_busid(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_setunique(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getmap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getclient(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getstats(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_setversion(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_noop(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Context IOCTL support (drm_context.h) */ ++extern int drm_resctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_addctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_modctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_switchctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_newctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmctx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern int drm_ctxbitmap_init(struct drm_device *dev); ++extern void drm_ctxbitmap_cleanup(struct drm_device *dev); ++extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); ++ ++extern int drm_setsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_getsareactx(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Drawable IOCTL support (drm_drawable.h) */ ++extern int drm_adddraw(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmdraw(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_update_drawable_info(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, ++ drm_drawable_t id); ++extern void drm_drawable_free_all(struct drm_device *dev); ++ ++ /* Authentication IOCTL support (drm_auth.h) */ ++extern int drm_getmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_authmagic(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* Locking IOCTL support (drm_lock.h) */ ++extern int drm_lock(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_unlock(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); ++extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); ++extern void drm_idlelock_take(struct drm_lock_data *lock_data); ++extern void drm_idlelock_release(struct drm_lock_data *lock_data); ++ ++/* ++ * These are exported to drivers so that they can implement fencing using ++ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. ++ */ ++ ++extern int drm_i_have_hw_lock(struct drm_device *dev, ++ struct drm_file *file_priv); ++ ++ /* Buffer management support (drm_bufs.h) */ ++extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); ++extern int drm_addmap(struct drm_device *dev, unsigned int offset, ++ unsigned int size, enum drm_map_type type, ++ enum drm_map_flags flags, drm_local_map_t ** map_ptr); ++extern int drm_addmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); ++extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); ++extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_addbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_infobufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_markbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_freebufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_mapbufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_order(unsigned long size); ++extern unsigned long drm_get_resource_start(struct drm_device *dev, ++ unsigned int resource); ++extern unsigned long drm_get_resource_len(struct drm_device *dev, ++ unsigned int resource); ++extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, ++ drm_local_map_t *map); ++ ++ ++ /* DMA support (drm_dma.h) */ ++extern int drm_dma_setup(struct drm_device *dev); ++extern void drm_dma_takedown(struct drm_device *dev); ++extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); ++extern void drm_core_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *filp); ++ ++ /* IRQ support (drm_irq.h) */ ++extern int drm_control(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); ++extern int drm_irq_install(struct drm_device *dev); ++extern int drm_irq_uninstall(struct drm_device *dev); ++extern void drm_driver_irq_preinstall(struct drm_device *dev); ++extern void drm_driver_irq_postinstall(struct drm_device *dev); ++extern void drm_driver_irq_uninstall(struct drm_device *dev); ++ ++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); ++extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); ++extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq); ++extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); ++extern u32 drm_vblank_count(struct drm_device *dev, int crtc); ++extern void drm_handle_vblank(struct drm_device *dev, int crtc); ++extern int drm_vblank_get(struct drm_device *dev, int crtc); ++extern void drm_vblank_put(struct drm_device *dev, int crtc); ++ ++ /* Modesetting support */ ++extern int drm_modeset_ctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* AGP/GART support (drm_agpsupport.h) */ ++extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); ++extern int drm_agp_acquire(struct drm_device *dev); ++extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_release(struct drm_device *dev); ++extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); ++extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); ++extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); ++extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); ++extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); ++extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); ++extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) ++extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); ++#else ++extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); ++#endif ++extern int drm_agp_free_memory(DRM_AGP_MEM * handle); ++extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); ++extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); ++extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); ++extern void drm_agp_chipset_flush(struct drm_device *dev); ++ /* Stub support (drm_stub.h) */ ++extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ++ struct drm_driver *driver); ++extern int drm_put_dev(struct drm_device *dev); ++extern int drm_put_minor(struct drm_device *dev); ++extern unsigned int drm_debug; /* 1 to enable debug output */ ++ ++extern struct class *drm_class; ++extern struct proc_dir_entry *drm_proc_root; ++ ++extern struct idr drm_minors_idr; ++ ++extern drm_local_map_t *drm_getsarea(struct drm_device *dev); ++ ++ /* Proc support (drm_proc.h) */ ++int drm_proc_init(struct drm_minor *minor, int minor_id, ++ struct proc_dir_entry *root); ++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); ++ ++ /* Scatter Gather Support (drm_scatter.h) */ ++extern void drm_sg_cleanup(struct drm_sg_mem * entry); ++extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); ++extern int drm_sg_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++ /* ATI PCIGART support (ati_pcigart.h) */ ++extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); ++extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); ++ ++extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, ++ size_t align, dma_addr_t maxaddr); ++extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); ++extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); ++ ++ /* sysfs support (drm_sysfs.c) */ ++struct drm_sysfs_class; ++extern struct class *drm_sysfs_create(struct module *owner, char *name); ++extern void drm_sysfs_destroy(void); ++extern int drm_sysfs_device_add(struct drm_minor *minor); ++extern void drm_sysfs_device_remove(struct drm_minor *minor); ++ ++/* ++ * Basic memory manager support (drm_mm.c) ++ */ ++ ++extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, ++ unsigned alignment); ++extern void drm_mm_put_block(struct drm_mm_node *cur); ++extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, ++ unsigned alignment, int best_match); ++extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); ++extern void drm_mm_takedown(struct drm_mm *mm); ++extern int drm_mm_clean(struct drm_mm *mm); ++extern unsigned long drm_mm_tail_space(struct drm_mm *mm); ++extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); ++extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); ++ ++static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) ++{ ++ return block->mm; ++} ++ ++/* Graphics Execution Manager library functions (drm_gem.c) */ ++int ++drm_gem_init (struct drm_device *dev); ++ ++void ++drm_gem_object_free (struct kref *kref); ++ ++struct drm_gem_object * ++drm_gem_object_alloc(struct drm_device *dev, size_t size); ++ ++void ++drm_gem_object_handle_free (struct kref *kref); ++ ++static inline void drm_gem_object_reference(struct drm_gem_object *obj) ++{ ++ kref_get(&obj->refcount); ++} ++ ++static inline void drm_gem_object_unreference(struct drm_gem_object *obj) ++{ ++ if (obj == NULL) ++ return; ++ ++ kref_put (&obj->refcount, drm_gem_object_free); ++} ++ ++int ++drm_gem_handle_create(struct drm_file *file_priv, ++ struct drm_gem_object *obj, ++ int *handlep); ++ ++static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj) ++{ ++ drm_gem_object_reference (obj); ++ kref_get(&obj->handlecount); ++} ++ ++static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj) ++{ ++ if (obj == NULL) ++ return; ++ ++ /* ++ * Must bump handle count first as this may be the last ++ * ref, in which case the object would disappear before we ++ * checked for a name ++ */ ++ kref_put (&obj->handlecount, drm_gem_object_handle_free); ++ drm_gem_object_unreference (obj); ++} ++ ++struct drm_gem_object * ++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, ++ int handle); ++int drm_gem_close_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int drm_gem_flink_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int drm_gem_open_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); ++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); ++ ++extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); ++extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); ++extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); ++ ++static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, ++ unsigned int token) ++{ ++ struct drm_map_list *_entry; ++ list_for_each_entry(_entry, &dev->maplist, head) ++ if (_entry->user_token == token) ++ return _entry->map; ++ return NULL; ++} ++ ++static __inline__ int drm_device_is_agp(struct drm_device *dev) ++{ ++ if ( dev->driver->device_is_agp != NULL ) { ++ int err = (*dev->driver->device_is_agp)(dev); ++ ++ if (err != 2) { ++ return err; ++ } ++ } ++ ++ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); ++} ++ ++static __inline__ int drm_device_is_pcie(struct drm_device *dev) ++{ ++ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); ++} ++ ++static __inline__ void drm_core_dropmap(struct drm_map *map) ++{ ++} ++ ++#ifndef DEBUG_MEMORY ++/** Wrapper around kmalloc() */ ++static __inline__ void *drm_alloc(size_t size, int area) ++{ ++ return kmalloc(size, GFP_KERNEL); ++} ++ ++/** Wrapper around kfree() */ ++static __inline__ void drm_free(void *pt, size_t size, int area) ++{ ++ kfree(pt); ++} ++#else ++extern void *drm_alloc(size_t size, int area); ++extern void drm_free(void *pt, size_t size, int area); ++#endif ++ ++/* ++ * Accounting variants of standard calls. ++ */ ++ ++static inline void *drm_ctl_alloc(size_t size, int area) ++{ ++ void *ret; ++ if (drm_alloc_memctl(size)) ++ return NULL; ++ ret = drm_alloc(size, area); ++ if (!ret) ++ drm_free_memctl(size); ++ return ret; ++} ++ ++static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) ++{ ++ void *ret; ++ ++ if (drm_alloc_memctl(nmemb*size)) ++ return NULL; ++ ret = drm_calloc(nmemb, size, area); ++ if (!ret) ++ drm_free_memctl(nmemb*size); ++ return ret; ++} ++ ++static inline void drm_ctl_free(void *pt, size_t size, int area) ++{ ++ drm_free(pt, size, area); ++ drm_free_memctl(size); ++} ++ ++/*@}*/ ++ ++#endif /* __KERNEL__ */ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_proc.c git-nokia/drivers/gpu/drm-tungsten/drm_proc.c +--- git/drivers/gpu/drm-tungsten/drm_proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_proc.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,743 @@ ++/** ++ * \file drm_proc.c ++ * /proc support for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ * ++ * \par Acknowledgements: ++ * Matthew J Sottek sent in a patch to fix ++ * the problem with the proc files not outputting all their information. ++ */ ++ ++/* ++ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++static int drm_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_vm_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_queues_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_bufs_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_objects_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_gem_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++static int drm_gem_object_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++#if DRM_DEBUG_CODE ++static int drm_vma_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data); ++#endif ++ ++/** ++ * Proc file list. ++ */ ++static struct drm_proc_list { ++ const char *name; /**< file name */ ++ int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ ++} drm_proc_list[] = { ++ {"name", drm_name_info}, ++ {"mem", drm_mem_info}, ++ {"vm", drm_vm_info}, ++ {"clients", drm_clients_info}, ++ {"queues", drm_queues_info}, ++ {"bufs", drm_bufs_info}, ++ {"objects", drm_objects_info}, ++ {"gem_names", drm_gem_name_info}, ++ {"gem_objects", drm_gem_object_info}, ++#if DRM_DEBUG_CODE ++ {"vma", drm_vma_info}, ++#endif ++}; ++ ++#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) ++ ++/** ++ * Initialize the DRI proc filesystem for a device. ++ * ++ * \param dev DRM device. ++ * \param minor device minor number. ++ * \param root DRI proc dir entry. ++ * \param dev_root resulting DRI device proc dir entry. ++ * \return root entry pointer on success, or NULL on failure. ++ * ++ * Create the DRI proc root entry "/proc/dri", the device proc root entry ++ * "/proc/dri/%minor%/", and each entry in proc_list as ++ * "/proc/dri/%minor%/%name%". ++ */ ++int drm_proc_init(struct drm_minor *minor, int minor_id, ++ struct proc_dir_entry *root) ++{ ++ struct proc_dir_entry *ent; ++ int i, j; ++ char name[64]; ++ ++ sprintf(name, "%d", minor_id); ++ minor->dev_root = proc_mkdir(name, root); ++ if (!minor->dev_root) { ++ DRM_ERROR("Cannot create /proc/dri/%s\n", name); ++ return -1; ++ } ++ ++ for (i = 0; i < DRM_PROC_ENTRIES; i++) { ++ ent = create_proc_entry(drm_proc_list[i].name, ++ S_IFREG | S_IRUGO, minor->dev_root); ++ if (!ent) { ++ DRM_ERROR("Cannot create /proc/dri/%s/%s\n", ++ name, drm_proc_list[i].name); ++ for (j = 0; j < i; j++) ++ remove_proc_entry(drm_proc_list[i].name, ++ minor->dev_root); ++ remove_proc_entry(name, root); ++ minor->dev_root = NULL; ++ return -1; ++ } ++ ent->read_proc = drm_proc_list[i].f; ++ ent->data = minor; ++ } ++ return 0; ++} ++ ++/** ++ * Cleanup the proc filesystem resources. ++ * ++ * \param minor device minor number. ++ * \param root DRI proc dir entry. ++ * \param dev_root DRI device proc dir entry. ++ * \return always zero. ++ * ++ * Remove all proc entries created by proc_init(). ++ */ ++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) ++{ ++ int i; ++ char name[64]; ++ ++ if (!root || !minor->dev_root) ++ return 0; ++ ++ for (i = 0; i < DRM_PROC_ENTRIES; i++) ++ remove_proc_entry(drm_proc_list[i].name, minor->dev_root); ++ sprintf(name, "%d", minor->index); ++ remove_proc_entry(name, root); ++ ++ return 0; ++} ++ ++/** ++ * Called when "/proc/dri/.../name" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * Prints the device name together with the bus id if available. ++ */ ++static int drm_name_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ if (dev->unique) { ++ DRM_PROC_PRINT("%s %s %s\n", ++ dev->driver->pci_driver.name, ++ pci_name(dev->pdev), dev->unique); ++ } else { ++ DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, ++ pci_name(dev->pdev)); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Called when "/proc/dri/.../vm" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ * ++ * Prints information about all mappings in drm_device::maplist. ++ */ ++static int drm__vm_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_map *map; ++ struct drm_map_list *r_list; ++ ++ /* Hardcoded from _DRM_FRAME_BUFFER, ++ _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, ++ _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */ ++ const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; ++ const char *type; ++ int i; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("slot offset size type flags " ++ "address mtrr\n\n"); ++ i = 0; ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ map = r_list->map; ++ if (!map) ++ continue; ++ if (map->type < 0 || map->type > 5) ++ type = "??"; ++ else ++ type = types[map->type]; ++ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", ++ i, ++ map->offset, ++ map->size, type, map->flags, ++ (unsigned long) r_list->user_token); ++ ++ if (map->mtrr < 0) { ++ DRM_PROC_PRINT("none\n"); ++ } else { ++ DRM_PROC_PRINT("%4d\n", map->mtrr); ++ } ++ i++; ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_vm_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__vm_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../queues" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__queues_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ int i; ++ struct drm_queue *q; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT(" ctx/flags use fin" ++ " blk/rw/rwf wait flushed queued" ++ " locks\n\n"); ++ for (i = 0; i < dev->queue_count; i++) { ++ q = dev->queuelist[i]; ++ atomic_inc(&q->use_count); ++ DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), ++ "%5d/0x%03x %5d %5d" ++ " %5d/%c%c/%c%c%c %5Zd\n", ++ i, ++ q->flags, ++ atomic_read(&q->use_count), ++ atomic_read(&q->finalization), ++ atomic_read(&q->block_count), ++ atomic_read(&q->block_read) ? 'r' : '-', ++ atomic_read(&q->block_write) ? 'w' : '-', ++ waitqueue_active(&q->read_queue) ? 'r' : '-', ++ waitqueue_active(&q-> ++ write_queue) ? 'w' : '-', ++ waitqueue_active(&q-> ++ flush_queue) ? 'f' : '-', ++ DRM_BUFCOUNT(&q->waitlist)); ++ atomic_dec(&q->use_count); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_queues_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__queues_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../bufs" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__bufs_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma || offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); ++ for (i = 0; i <= DRM_MAX_ORDER; i++) { ++ if (dma->bufs[i].buf_count) ++ DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", ++ i, ++ dma->bufs[i].buf_size, ++ dma->bufs[i].buf_count, ++ atomic_read(&dma->bufs[i] ++ .freelist.count), ++ dma->bufs[i].seg_count, ++ dma->bufs[i].seg_count ++ * (1 << dma->bufs[i].page_order), ++ (dma->bufs[i].seg_count ++ * (1 << dma->bufs[i].page_order)) ++ * PAGE_SIZE / 1024); ++ } ++ DRM_PROC_PRINT("\n"); ++ for (i = 0; i < dma->buf_count; i++) { ++ if (i && !(i % 32)) ++ DRM_PROC_PRINT("\n"); ++ DRM_PROC_PRINT(" %d", dma->buflist[i]->list); ++ } ++ DRM_PROC_PRINT("\n"); ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_bufs_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__bufs_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../objects" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__objects_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_buffer_manager *bm = &dev->bm; ++ struct drm_fence_manager *fm = &dev->fm; ++ uint64_t used_mem; ++ uint64_t used_emer; ++ uint64_t low_mem; ++ uint64_t high_mem; ++ uint64_t emer_mem; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("Object accounting:\n\n"); ++ if (fm->initialized) { ++ DRM_PROC_PRINT("Number of active fence objects: %d.\n", ++ atomic_read(&fm->count)); ++ } else { ++ DRM_PROC_PRINT("Fence objects are not supported by this driver\n"); ++ } ++ ++ if (bm->initialized) { ++ DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", ++ atomic_read(&bm->count)); ++ } ++ DRM_PROC_PRINT("Memory accounting:\n\n"); ++ if (bm->initialized) { ++ DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); ++ } else { ++ DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n"); ++ } ++ ++ drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem); ++ ++ if (used_mem > 16*PAGE_SIZE) { ++ DRM_PROC_PRINT("Used object memory is %lu pages.\n", ++ (unsigned long) (used_mem >> PAGE_SHIFT)); ++ } else { ++ DRM_PROC_PRINT("Used object memory is %lu bytes.\n", ++ (unsigned long) used_mem); ++ } ++ if (used_emer > 16*PAGE_SIZE) { ++ DRM_PROC_PRINT("Used emergency memory is %lu pages.\n", ++ (unsigned long) (used_emer >> PAGE_SHIFT)); ++ } else { ++ DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n", ++ (unsigned long) used_emer); ++ } ++ DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n", ++ (unsigned long) (low_mem >> PAGE_SHIFT)); ++ DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n", ++ (unsigned long) (high_mem >> PAGE_SHIFT)); ++ DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n", ++ (unsigned long) (emer_mem >> PAGE_SHIFT)); ++ ++ DRM_PROC_PRINT("\n"); ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_objects_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__objects_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when "/proc/dri/.../clients" is read. ++ * ++ * \param buf output buffer. ++ * \param start start of output data. ++ * \param offset requested start offset. ++ * \param request requested number of bytes. ++ * \param eof whether there is no more data to return. ++ * \param data private data. ++ * \return number of written bytes. ++ */ ++static int drm__clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_file *priv; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); ++ list_for_each_entry(priv, &dev->filelist, lhead) { ++ DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", ++ priv->authenticated ? 'y' : 'n', ++ priv->minor->index, ++ priv->pid, ++ priv->uid, priv->magic, priv->ioctl_count); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++/** ++ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. ++ */ ++static int drm_clients_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__clients_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++struct drm_gem_name_info_data { ++ int len; ++ char *buf; ++ int eof; ++}; ++ ++static int drm_gem_one_name_info(int id, void *ptr, void *data) ++{ ++ struct drm_gem_object *obj = ptr; ++ struct drm_gem_name_info_data *nid = data; ++ ++ DRM_INFO("name %d size %d\n", obj->name, obj->size); ++ if (nid->eof) ++ return 0; ++ ++ nid->len += sprintf(&nid->buf[nid->len], ++ "%6d%9d%8d%9d\n", ++ obj->name, obj->size, ++ atomic_read(&obj->handlecount.refcount), ++ atomic_read(&obj->refcount.refcount)); ++ if (nid->len > DRM_PROC_LIMIT) { ++ nid->eof = 1; ++ return 0; ++ } ++ return 0; ++} ++ ++static int drm_gem_name_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ struct drm_gem_name_info_data nid; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ nid.len = sprintf(buf, " name size handles refcount\n"); ++ nid.buf = buf; ++ nid.eof = 0; ++ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ if (nid.len > request + offset) ++ return request; ++ *eof = 1; ++ return nid.len - offset; ++} ++ ++static int drm_gem_object_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); ++ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); ++ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); ++ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); ++ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); ++ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++#if DRM_DEBUG_CODE ++ ++static int drm__vma_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int len = 0; ++ struct drm_vma_entry *pt; ++ struct vm_area_struct *vma; ++#if defined(__i386__) ++ unsigned int pgprot; ++#endif ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ ++ DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", ++ atomic_read(&dev->vma_count), ++ high_memory, virt_to_phys(high_memory)); ++ list_for_each_entry(pt, &dev->vmalist, head) { ++ if (!(vma = pt->vma)) ++ continue; ++ DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", ++ pt->pid, ++ vma->vm_start, ++ vma->vm_end, ++ vma->vm_flags & VM_READ ? 'r' : '-', ++ vma->vm_flags & VM_WRITE ? 'w' : '-', ++ vma->vm_flags & VM_EXEC ? 'x' : '-', ++ vma->vm_flags & VM_MAYSHARE ? 's' : 'p', ++ vma->vm_flags & VM_LOCKED ? 'l' : '-', ++ vma->vm_flags & VM_IO ? 'i' : '-', ++ vma->vm_pgoff); ++ ++#if defined(__i386__) ++ pgprot = pgprot_val(vma->vm_page_prot); ++ DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", ++ pgprot & _PAGE_PRESENT ? 'p' : '-', ++ pgprot & _PAGE_RW ? 'w' : 'r', ++ pgprot & _PAGE_USER ? 'u' : 's', ++ pgprot & _PAGE_PWT ? 't' : 'b', ++ pgprot & _PAGE_PCD ? 'u' : 'c', ++ pgprot & _PAGE_ACCESSED ? 'a' : '-', ++ pgprot & _PAGE_DIRTY ? 'd' : '-', ++ pgprot & _PAGE_PSE ? 'm' : 'k', ++ pgprot & _PAGE_GLOBAL ? 'g' : 'l'); ++#endif ++ DRM_PROC_PRINT("\n"); ++ } ++ ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int drm_vma_info(char *buf, char **start, off_t offset, int request, ++ int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm__vma_info(buf, start, offset, request, eof, data); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_regman.c git-nokia/drivers/gpu/drm-tungsten/drm_regman.c +--- git/drivers/gpu/drm-tungsten/drm_regman.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_regman.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,200 @@ ++/************************************************************************** ++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * An allocate-fence manager implementation intended for sets of base-registers ++ * or tiling-registers. ++ */ ++ ++#include "drmP.h" ++ ++/* ++ * Allocate a compatible register and put it on the unfenced list. ++ */ ++ ++int drm_regs_alloc(struct drm_reg_manager *manager, ++ const void *data, ++ uint32_t fence_class, ++ uint32_t fence_type, ++ int interruptible, int no_wait, struct drm_reg **reg) ++{ ++ struct drm_reg *entry, *next_entry; ++ int ret; ++ ++ *reg = NULL; ++ ++ /* ++ * Search the unfenced list. ++ */ ++ ++ list_for_each_entry(entry, &manager->unfenced, head) { ++ if (manager->reg_reusable(entry, data)) { ++ entry->new_fence_type |= fence_type; ++ goto out; ++ } ++ } ++ ++ /* ++ * Search the lru list. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ struct drm_fence_object *fence = entry->fence; ++ if (fence->fence_class == fence_class && ++ (entry->fence_type & fence_type) == entry->fence_type && ++ manager->reg_reusable(entry, data)) { ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ } ++ ++ /* ++ * Search the free list. ++ */ ++ ++ list_for_each_entry(entry, &manager->free, head) { ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ ++ if (no_wait) ++ return -EBUSY; ++ ++ /* ++ * Go back to the lru list and try to expire fences. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ BUG_ON(!entry->fence); ++ ret = drm_fence_object_wait(entry->fence, 0, !interruptible, ++ entry->fence_type); ++ if (ret) ++ return ret; ++ ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ list_del(&entry->head); ++ entry->new_fence_type = fence_type; ++ list_add_tail(&entry->head, &manager->unfenced); ++ goto out; ++ } ++ ++ /* ++ * Oops. All registers are used up :(. ++ */ ++ ++ return -EBUSY; ++out: ++ *reg = entry; ++ return 0; ++} ++EXPORT_SYMBOL(drm_regs_alloc); ++ ++void drm_regs_fence(struct drm_reg_manager *manager, ++ struct drm_fence_object *fence) ++{ ++ struct drm_reg *entry; ++ struct drm_reg *next_entry; ++ ++ if (!fence) { ++ ++ /* ++ * Old fence (if any) is still valid. ++ * Put back on free and lru lists. ++ */ ++ ++ list_for_each_entry_safe_reverse(entry, next_entry, ++ &manager->unfenced, head) { ++ list_del(&entry->head); ++ list_add(&entry->head, (entry->fence) ? ++ &manager->lru : &manager->free); ++ } ++ } else { ++ ++ /* ++ * Fence with a new fence and put on lru list. ++ */ ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->unfenced, ++ head) { ++ list_del(&entry->head); ++ if (entry->fence) ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ drm_fence_reference_unlocked(&entry->fence, fence); ++ ++ entry->fence_type = entry->new_fence_type; ++ BUG_ON((entry->fence_type & fence->type) != ++ entry->fence_type); ++ ++ list_add_tail(&entry->head, &manager->lru); ++ } ++ } ++} ++EXPORT_SYMBOL(drm_regs_fence); ++ ++void drm_regs_free(struct drm_reg_manager *manager) ++{ ++ struct drm_reg *entry; ++ struct drm_reg *next_entry; ++ ++ drm_regs_fence(manager, NULL); ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->free, head) { ++ list_del(&entry->head); ++ manager->reg_destroy(entry); ++ } ++ ++ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { ++ ++ (void)drm_fence_object_wait(entry->fence, 1, 1, ++ entry->fence_type); ++ list_del(&entry->head); ++ drm_fence_usage_deref_unlocked(&entry->fence); ++ manager->reg_destroy(entry); ++ } ++} ++EXPORT_SYMBOL(drm_regs_free); ++ ++void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg) ++{ ++ reg->fence = NULL; ++ list_add_tail(®->head, &manager->free); ++} ++EXPORT_SYMBOL(drm_regs_add); ++ ++void drm_regs_init(struct drm_reg_manager *manager, ++ int (*reg_reusable) (const struct drm_reg *, const void *), ++ void (*reg_destroy) (struct drm_reg *)) ++{ ++ INIT_LIST_HEAD(&manager->free); ++ INIT_LIST_HEAD(&manager->lru); ++ INIT_LIST_HEAD(&manager->unfenced); ++ manager->reg_reusable = reg_reusable; ++ manager->reg_destroy = reg_destroy; ++} ++EXPORT_SYMBOL(drm_regs_init); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_sarea.h git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h +--- git/drivers/gpu/drm-tungsten/drm_sarea.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,84 @@ ++/** ++ * \file drm_sarea.h ++ * \brief SAREA definitions ++ * ++ * \author Michel D�zer ++ */ ++ ++/* ++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _DRM_SAREA_H_ ++#define _DRM_SAREA_H_ ++ ++#include "drm.h" ++ ++/* SAREA area needs to be at least a page */ ++#if defined(__alpha__) ++#define SAREA_MAX 0x2000 ++#elif defined(__ia64__) ++#define SAREA_MAX 0x10000 /* 64kB */ ++#else ++/* Intel 830M driver needs at least 8k SAREA */ ++#define SAREA_MAX 0x2000UL ++#endif ++ ++/** Maximum number of drawables in the SAREA */ ++#define SAREA_MAX_DRAWABLES 256 ++ ++#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 ++ ++/** SAREA drawable */ ++struct drm_sarea_drawable { ++ unsigned int stamp; ++ unsigned int flags; ++}; ++ ++/** SAREA frame */ ++struct drm_sarea_frame { ++ unsigned int x; ++ unsigned int y; ++ unsigned int width; ++ unsigned int height; ++ unsigned int fullscreen; ++}; ++ ++/** SAREA */ ++struct drm_sarea { ++ /** first thing is always the DRM locking structure */ ++ struct drm_hw_lock lock; ++ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */ ++ struct drm_hw_lock drawable_lock; ++ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ ++ struct drm_sarea_frame frame; /**< frame */ ++ drm_context_t dummy_context; ++}; ++ ++#ifndef __KERNEL__ ++typedef struct drm_sarea_drawable drm_sarea_drawable_t; ++typedef struct drm_sarea_frame drm_sarea_frame_t; ++typedef struct drm_sarea drm_sarea_t; ++#endif ++ ++#endif /* _DRM_SAREA_H_ */ +diff -Nurd git/drivers/gpu/drm-tungsten/drm_scatter.c git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c +--- git/drivers/gpu/drm-tungsten/drm_scatter.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,228 @@ ++/** ++ * \file drm_scatter.c ++ * IOCTLs to manage scatter/gather memory ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include "drmP.h" ++ ++#define DEBUG_SCATTER 0 ++ ++static inline void *drm_vmalloc_dma(unsigned long size) ++{ ++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) ++ return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); ++#else ++ return vmalloc_32(size); ++#endif ++} ++ ++void drm_sg_cleanup(struct drm_sg_mem *entry) ++{ ++ struct page *page; ++ int i; ++ ++ for (i = 0; i < entry->pages; i++) { ++ page = entry->pagelist[i]; ++ if (page) ++ ClearPageReserved(page); ++ } ++ ++ vfree(entry->virtual); ++ ++ drm_free(entry->busaddr, ++ entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++} ++EXPORT_SYMBOL(drm_sg_cleanup); ++ ++#ifdef _LP64 ++# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) ++#else ++# define ScatterHandle(x) (unsigned int)(x) ++#endif ++ ++int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) ++{ ++ struct drm_sg_mem *entry; ++ unsigned long pages, i, j; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ if (dev->sg) ++ return -EINVAL; ++ ++ entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); ++ if (!entry) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(*entry)); ++ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; ++ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); ++ ++ entry->pages = pages; ++ entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ if (!entry->pagelist) { ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ ++ memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist)); ++ ++ entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr), ++ DRM_MEM_PAGES); ++ if (!entry->busaddr) { ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr)); ++ ++ entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); ++ if (!entry->virtual) { ++ drm_free(entry->busaddr, ++ entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES); ++ drm_free(entry->pagelist, ++ entry->pages * sizeof(*entry->pagelist), ++ DRM_MEM_PAGES); ++ drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); ++ return -ENOMEM; ++ } ++ ++ /* This also forces the mapping of COW pages, so our page list ++ * will be valid. Please don't remove it... ++ */ ++ memset(entry->virtual, 0, pages << PAGE_SHIFT); ++ ++ entry->handle = ScatterHandle((unsigned long)entry->virtual); ++ ++ DRM_DEBUG("handle = %08lx\n", entry->handle); ++ DRM_DEBUG("virtual = %p\n", entry->virtual); ++ ++ for (i = (unsigned long)entry->virtual, j = 0; j < pages; ++ i += PAGE_SIZE, j++) { ++ entry->pagelist[j] = vmalloc_to_page((void *)i); ++ if (!entry->pagelist[j]) ++ goto failed; ++ SetPageReserved(entry->pagelist[j]); ++ } ++ ++ request->handle = entry->handle; ++ ++ dev->sg = entry; ++ ++#if DEBUG_SCATTER ++ /* Verify that each page points to its virtual address, and vice ++ * versa. ++ */ ++ { ++ int error = 0; ++ ++ for (i = 0; i < pages; i++) { ++ unsigned long *tmp; ++ ++ tmp = page_address(entry->pagelist[i]); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ *tmp = 0xcafebabe; ++ } ++ tmp = (unsigned long *)((u8 *) entry->virtual + ++ (PAGE_SIZE * i)); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ if (*tmp != 0xcafebabe && error == 0) { ++ error = 1; ++ DRM_ERROR("Scatter allocation error, " ++ "pagelist does not match " ++ "virtual mapping\n"); ++ } ++ } ++ tmp = page_address(entry->pagelist[i]); ++ for (j = 0; ++ j < PAGE_SIZE / sizeof(unsigned long); ++ j++, tmp++) { ++ *tmp = 0; ++ } ++ } ++ if (error == 0) ++ DRM_ERROR("Scatter allocation matches pagelist\n"); ++ } ++#endif ++ ++ return 0; ++ ++ failed: ++ drm_sg_cleanup(entry); ++ return -ENOMEM; ++ ++} ++EXPORT_SYMBOL(drm_sg_alloc); ++ ++int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_scatter_gather *request = data; ++ ++ return drm_sg_alloc(dev, request); ++ ++} ++ ++int drm_sg_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_scatter_gather *request = data; ++ struct drm_sg_mem *entry; ++ ++ if (!drm_core_check_feature(dev, DRIVER_SG)) ++ return -EINVAL; ++ ++ entry = dev->sg; ++ dev->sg = NULL; ++ ++ if (!entry || entry->handle != request->handle) ++ return -EINVAL; ++ ++ DRM_DEBUG("virtual = %p\n", entry->virtual); ++ ++ drm_sg_cleanup(entry); ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.c git-nokia/drivers/gpu/drm-tungsten/drm_sman.c +--- git/drivers/gpu/drm-tungsten/drm_sman.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,353 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple memory manager interface that keeps track on allocate regions on a ++ * per "owner" basis. All regions associated with an "owner" can be released ++ * with a simple call. Typically if the "owner" exists. The owner is any ++ * "unsigned long" identifier. Can typically be a pointer to a file private ++ * struct or a context identifier. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drm_sman.h" ++ ++struct drm_owner_item { ++ struct drm_hash_item owner_hash; ++ struct list_head sman_list; ++ struct list_head mem_blocks; ++}; ++ ++void drm_sman_takedown(struct drm_sman * sman) ++{ ++ drm_ht_remove(&sman->user_hash_tab); ++ drm_ht_remove(&sman->owner_hash_tab); ++ if (sman->mm) ++ drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm), ++ DRM_MEM_MM); ++} ++ ++EXPORT_SYMBOL(drm_sman_takedown); ++ ++int ++drm_sman_init(struct drm_sman * sman, unsigned int num_managers, ++ unsigned int user_order, unsigned int owner_order) ++{ ++ int ret = 0; ++ ++ sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), ++ DRM_MEM_MM); ++ if (!sman->mm) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ sman->num_managers = num_managers; ++ INIT_LIST_HEAD(&sman->owner_items); ++ ret = drm_ht_create(&sman->owner_hash_tab, owner_order); ++ if (ret) ++ goto out1; ++ ret = drm_ht_create(&sman->user_hash_tab, user_order); ++ if (!ret) ++ goto out; ++ ++ drm_ht_remove(&sman->owner_hash_tab); ++out1: ++ drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM); ++out: ++ return ret; ++} ++ ++EXPORT_SYMBOL(drm_sman_init); ++ ++static void *drm_sman_mm_allocate(void *private, unsigned long size, ++ unsigned alignment) ++{ ++ struct drm_mm *mm = (struct drm_mm *) private; ++ struct drm_mm_node *tmp; ++ ++ tmp = drm_mm_search_free(mm, size, alignment, 1); ++ if (!tmp) { ++ return NULL; ++ } ++ tmp = drm_mm_get_block(tmp, size, alignment); ++ return tmp; ++} ++ ++static void drm_sman_mm_free(void *private, void *ref) ++{ ++ struct drm_mm_node *node = (struct drm_mm_node *) ref; ++ ++ drm_mm_put_block(node); ++} ++ ++static void drm_sman_mm_destroy(void *private) ++{ ++ struct drm_mm *mm = (struct drm_mm *) private; ++ drm_mm_takedown(mm); ++ drm_free(mm, sizeof(*mm), DRM_MEM_MM); ++} ++ ++static unsigned long drm_sman_mm_offset(void *private, void *ref) ++{ ++ struct drm_mm_node *node = (struct drm_mm_node *) ref; ++ return node->start; ++} ++ ++int ++drm_sman_set_range(struct drm_sman * sman, unsigned int manager, ++ unsigned long start, unsigned long size) ++{ ++ struct drm_sman_mm *sman_mm; ++ struct drm_mm *mm; ++ int ret; ++ ++ BUG_ON(manager >= sman->num_managers); ++ ++ sman_mm = &sman->mm[manager]; ++ mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM); ++ if (!mm) { ++ return -ENOMEM; ++ } ++ sman_mm->private = mm; ++ ret = drm_mm_init(mm, start, size); ++ ++ if (ret) { ++ drm_free(mm, sizeof(*mm), DRM_MEM_MM); ++ return ret; ++ } ++ ++ sman_mm->allocate = drm_sman_mm_allocate; ++ sman_mm->free = drm_sman_mm_free; ++ sman_mm->destroy = drm_sman_mm_destroy; ++ sman_mm->offset = drm_sman_mm_offset; ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_set_range); ++ ++int ++drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, ++ struct drm_sman_mm * allocator) ++{ ++ BUG_ON(manager >= sman->num_managers); ++ sman->mm[manager] = *allocator; ++ ++ return 0; ++} ++EXPORT_SYMBOL(drm_sman_set_manager); ++ ++static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, ++ unsigned long owner) ++{ ++ int ret; ++ struct drm_hash_item *owner_hash_item; ++ struct drm_owner_item *owner_item; ++ ++ ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); ++ if (!ret) { ++ return drm_hash_entry(owner_hash_item, struct drm_owner_item, ++ owner_hash); ++ } ++ ++ owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM); ++ if (!owner_item) ++ goto out; ++ ++ INIT_LIST_HEAD(&owner_item->mem_blocks); ++ owner_item->owner_hash.key = owner; ++ if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) ++ goto out1; ++ ++ list_add_tail(&owner_item->sman_list, &sman->owner_items); ++ return owner_item; ++ ++out1: ++ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); ++out: ++ return NULL; ++} ++ ++struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, ++ unsigned long size, unsigned alignment, ++ unsigned long owner) ++{ ++ void *tmp; ++ struct drm_sman_mm *sman_mm; ++ struct drm_owner_item *owner_item; ++ struct drm_memblock_item *memblock; ++ ++ BUG_ON(manager >= sman->num_managers); ++ ++ sman_mm = &sman->mm[manager]; ++ tmp = sman_mm->allocate(sman_mm->private, size, alignment); ++ ++ if (!tmp) { ++ return NULL; ++ } ++ ++ memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM); ++ ++ if (!memblock) ++ goto out; ++ ++ memblock->mm_info = tmp; ++ memblock->mm = sman_mm; ++ memblock->sman = sman; ++ ++ if (drm_ht_just_insert_please ++ (&sman->user_hash_tab, &memblock->user_hash, ++ (unsigned long)memblock, 32, 0, 0)) ++ goto out1; ++ ++ owner_item = drm_sman_get_owner_item(sman, owner); ++ if (!owner_item) ++ goto out2; ++ ++ list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); ++ ++ return memblock; ++ ++out2: ++ drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); ++out1: ++ drm_free(memblock, sizeof(*memblock), DRM_MEM_MM); ++out: ++ sman_mm->free(sman_mm->private, tmp); ++ ++ return NULL; ++} ++ ++EXPORT_SYMBOL(drm_sman_alloc); ++ ++static void drm_sman_free(struct drm_memblock_item *item) ++{ ++ struct drm_sman *sman = item->sman; ++ ++ list_del(&item->owner_list); ++ drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); ++ item->mm->free(item->mm->private, item->mm_info); ++ drm_free(item, sizeof(*item), DRM_MEM_MM); ++} ++ ++int drm_sman_free_key(struct drm_sman *sman, unsigned int key) ++{ ++ struct drm_hash_item *hash_item; ++ struct drm_memblock_item *memblock_item; ++ ++ if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) ++ return -EINVAL; ++ ++ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, ++ user_hash); ++ drm_sman_free(memblock_item); ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_free_key); ++ ++static void drm_sman_remove_owner(struct drm_sman *sman, ++ struct drm_owner_item *owner_item) ++{ ++ list_del(&owner_item->sman_list); ++ drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); ++ drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); ++} ++ ++int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) ++{ ++ ++ struct drm_hash_item *hash_item; ++ struct drm_owner_item *owner_item; ++ ++ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { ++ return -1; ++ } ++ ++ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); ++ if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { ++ drm_sman_remove_owner(sman, owner_item); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(drm_sman_owner_clean); ++ ++static void drm_sman_do_owner_cleanup(struct drm_sman *sman, ++ struct drm_owner_item *owner_item) ++{ ++ struct drm_memblock_item *entry, *next; ++ ++ list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, ++ owner_list) { ++ drm_sman_free(entry); ++ } ++ drm_sman_remove_owner(sman, owner_item); ++} ++ ++void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) ++{ ++ ++ struct drm_hash_item *hash_item; ++ struct drm_owner_item *owner_item; ++ ++ if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { ++ ++ return; ++ } ++ ++ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); ++ drm_sman_do_owner_cleanup(sman, owner_item); ++} ++ ++EXPORT_SYMBOL(drm_sman_owner_cleanup); ++ ++void drm_sman_cleanup(struct drm_sman *sman) ++{ ++ struct drm_owner_item *entry, *next; ++ unsigned int i; ++ struct drm_sman_mm *sman_mm; ++ ++ list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { ++ drm_sman_do_owner_cleanup(sman, entry); ++ } ++ if (sman->mm) { ++ for (i = 0; i < sman->num_managers; ++i) { ++ sman_mm = &sman->mm[i]; ++ if (sman_mm->private) { ++ sman_mm->destroy(sman_mm->private); ++ sman_mm->private = NULL; ++ } ++ } ++ } ++} ++ ++EXPORT_SYMBOL(drm_sman_cleanup); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.h git-nokia/drivers/gpu/drm-tungsten/drm_sman.h +--- git/drivers/gpu/drm-tungsten/drm_sman.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,176 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Simple memory MANager interface that keeps track on allocate regions on a ++ * per "owner" basis. All regions associated with an "owner" can be released ++ * with a simple call. Typically if the "owner" exists. The owner is any ++ * "unsigned long" identifier. Can typically be a pointer to a file private ++ * struct or a context identifier. ++ * ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#ifndef DRM_SMAN_H ++#define DRM_SMAN_H ++ ++#include "drmP.h" ++#include "drm_hashtab.h" ++ ++/* ++ * A class that is an abstration of a simple memory allocator. ++ * The sman implementation provides a default such allocator ++ * using the drm_mm.c implementation. But the user can replace it. ++ * See the SiS implementation, which may use the SiS FB kernel module ++ * for memory management. ++ */ ++ ++struct drm_sman_mm { ++ /* private info. If allocated, needs to be destroyed by the destroy ++ function */ ++ void *private; ++ ++ /* Allocate a memory block with given size and alignment. ++ Return an opaque reference to the memory block */ ++ ++ void *(*allocate) (void *private, unsigned long size, ++ unsigned alignment); ++ ++ /* Free a memory block. "ref" is the opaque reference that we got from ++ the "alloc" function */ ++ ++ void (*free) (void *private, void *ref); ++ ++ /* Free all resources associated with this allocator */ ++ ++ void (*destroy) (void *private); ++ ++ /* Return a memory offset from the opaque reference returned from the ++ "alloc" function */ ++ ++ unsigned long (*offset) (void *private, void *ref); ++}; ++ ++struct drm_memblock_item { ++ struct list_head owner_list; ++ struct drm_hash_item user_hash; ++ void *mm_info; ++ struct drm_sman_mm *mm; ++ struct drm_sman *sman; ++}; ++ ++struct drm_sman { ++ struct drm_sman_mm *mm; ++ int num_managers; ++ struct drm_open_hash owner_hash_tab; ++ struct drm_open_hash user_hash_tab; ++ struct list_head owner_items; ++}; ++ ++/* ++ * Take down a memory manager. This function should only be called after a ++ * successful init and after a call to drm_sman_cleanup. ++ */ ++ ++extern void drm_sman_takedown(struct drm_sman * sman); ++ ++/* ++ * Allocate structures for a manager. ++ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....) ++ * user_order is the log2 of the number of buckets in the user hash table. ++ * set this to approximately log2 of the max number of memory regions ++ * that will be allocated for _all_ pools together. ++ * owner_order is the log2 of the number of buckets in the owner hash table. ++ * set this to approximately log2 of ++ * the number of client file connections that will ++ * be using the manager. ++ * ++ */ ++ ++extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, ++ unsigned int user_order, unsigned int owner_order); ++ ++/* ++ * Initialize a drm_mm.c allocator. Should be called only once for each ++ * manager unless a customized allogator is used. ++ */ ++ ++extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, ++ unsigned long start, unsigned long size); ++ ++/* ++ * Initialize a customized allocator for one of the managers. ++ * (See the SiS module). The object pointed to by "allocator" is copied, ++ * so it can be destroyed after this call. ++ */ ++ ++extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, ++ struct drm_sman_mm * allocator); ++ ++/* ++ * Allocate a memory block. Aligment is not implemented yet. ++ */ ++ ++extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, ++ unsigned int manager, ++ unsigned long size, ++ unsigned alignment, ++ unsigned long owner); ++/* ++ * Free a memory block identified by its user hash key. ++ */ ++ ++extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); ++ ++/* ++ * returns 1 iff there are no stale memory blocks associated with this owner. ++ * Typically called to determine if we need to idle the hardware and call ++ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all ++ * resources associated with owner. ++ */ ++ ++extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); ++ ++/* ++ * Frees all stale memory blocks associated with this owner. Note that this ++ * requires that the hardware is finished with all blocks, so the graphics engine ++ * should be idled before this call is made. This function also frees ++ * any resources associated with "owner" and should be called when owner ++ * is not going to be referenced anymore. ++ */ ++ ++extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); ++ ++/* ++ * Frees all stale memory blocks associated with the memory manager. ++ * See idling above. ++ */ ++ ++extern void drm_sman_cleanup(struct drm_sman * sman); ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/drm_stub.c git-nokia/drivers/gpu/drm-tungsten/drm_stub.c +--- git/drivers/gpu/drm-tungsten/drm_stub.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_stub.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,400 @@ ++/** ++ * \file drm_stub.c ++ * Stub support ++ * ++ * \author Rickard E. (Rik) Faith ++ */ ++ ++/* ++ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org ++ * ++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++#include "drmP.h" ++#include "drm_core.h" ++ ++unsigned int drm_debug = 0; /* 1 to enable debug output */ ++EXPORT_SYMBOL(drm_debug); ++ ++MODULE_AUTHOR(CORE_AUTHOR); ++MODULE_DESCRIPTION(CORE_DESC); ++MODULE_LICENSE("GPL and additional rights"); ++MODULE_PARM_DESC(debug, "Enable debug output"); ++ ++module_param_named(debug, drm_debug, int, 0600); ++ ++struct idr drm_minors_idr; ++ ++struct class *drm_class; ++struct proc_dir_entry *drm_proc_root; ++ ++static int drm_minor_get_id(struct drm_device *dev, int type) ++{ ++ int new_id; ++ int ret; ++ int base = 0, limit = 63; ++ ++again: ++ if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) { ++ DRM_ERROR("Out of memory expanding drawable idr\n"); ++ return -ENOMEM; ++ } ++ mutex_lock(&dev->struct_mutex); ++ ret = idr_get_new_above(&drm_minors_idr, NULL, ++ base, &new_id); ++ mutex_unlock(&dev->struct_mutex); ++ if (ret == -EAGAIN) { ++ goto again; ++ } else if (ret) { ++ return ret; ++ } ++ ++ if (new_id >= limit) { ++ idr_remove(&drm_minors_idr, new_id); ++ return -EINVAL; ++ } ++ return new_id; ++} ++ ++static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, ++ const struct pci_device_id *ent, ++ struct drm_driver *driver) ++{ ++ int retcode; ++ ++ INIT_LIST_HEAD(&dev->filelist); ++ INIT_LIST_HEAD(&dev->ctxlist); ++ INIT_LIST_HEAD(&dev->vmalist); ++ INIT_LIST_HEAD(&dev->maplist); ++ ++ spin_lock_init(&dev->count_lock); ++ spin_lock_init(&dev->drw_lock); ++ spin_lock_init(&dev->tasklet_lock); ++ spin_lock_init(&dev->lock.spinlock); ++ init_timer(&dev->timer); ++ mutex_init(&dev->struct_mutex); ++ mutex_init(&dev->ctxlist_mutex); ++ mutex_init(&dev->bm.evict_mutex); ++ ++ idr_init(&dev->drw_idr); ++ ++ dev->pdev = pdev; ++ ++ if (pdev) { ++ dev->pci_device = pdev->device; ++ dev->pci_vendor = pdev->vendor; ++ ++#ifdef __alpha__ ++ dev->hose = pdev->sysdata; ++#endif ++ ++ dev->irq = pdev->irq; ++ } ++ ++ dev->irq_enabled = 0; ++ ++ if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { ++ return -ENOMEM; ++ } ++ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, ++ DRM_FILE_PAGE_OFFSET_SIZE)) { ++ drm_ht_remove(&dev->map_hash); ++ return -ENOMEM; ++ } ++ ++ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { ++ drm_ht_remove(&dev->map_hash); ++ drm_mm_takedown(&dev->offset_manager); ++ return -ENOMEM; ++ } ++ ++ /* the DRM has 6 counters */ ++ dev->counters = 6; ++ dev->types[0] = _DRM_STAT_LOCK; ++ dev->types[1] = _DRM_STAT_OPENS; ++ dev->types[2] = _DRM_STAT_CLOSES; ++ dev->types[3] = _DRM_STAT_IOCTLS; ++ dev->types[4] = _DRM_STAT_LOCKS; ++ dev->types[5] = _DRM_STAT_UNLOCKS; ++ ++ dev->driver = driver; ++ ++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) ++ if (drm_core_has_AGP(dev)) { ++ if (drm_device_is_agp(dev)) ++ dev->agp = drm_agp_init(dev); ++ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) ++ && (dev->agp == NULL)) { ++ DRM_ERROR("Cannot initialize the agpgart module.\n"); ++ retcode = -EINVAL; ++ goto error_out_unreg; ++ } ++ ++ if (drm_core_has_MTRR(dev)) { ++ if (dev->agp) ++ dev->agp->agp_mtrr = ++ mtrr_add(dev->agp->agp_info.aper_base, ++ dev->agp->agp_info.aper_size * ++ 1024 * 1024, MTRR_TYPE_WRCOMB, 1); ++ } ++ } ++#endif ++ ++ retcode = drm_ctxbitmap_init(dev); ++ if (retcode) { ++ DRM_ERROR("Cannot allocate memory for context bitmap.\n"); ++ goto error_out_unreg; ++ } ++ ++ if (driver->driver_features & DRIVER_GEM) { ++ retcode = drm_gem_init (dev); ++ if (retcode) { ++ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); ++ goto error_out_unreg; ++ } ++ } ++ ++ drm_fence_manager_init(dev); ++ ++ return 0; ++ ++error_out_unreg: ++ drm_lastclose(dev); ++ return retcode; ++} ++ ++/** ++ * Get a secondary minor number. ++ * ++ * \param dev device data structure ++ * \param sec-minor structure to hold the assigned minor ++ * \return negative number on failure. ++ * ++ * Search an empty entry and initialize it to the given parameters, and ++ * create the proc init entry via proc_init(). This routines assigns ++ * minor numbers to secondary heads of multi-headed cards ++ */ ++static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) ++{ ++ struct drm_minor *new_minor; ++ int ret; ++ int minor_id; ++ ++ DRM_DEBUG("\n"); ++ ++ minor_id = drm_minor_get_id(dev, type); ++ if (minor_id < 0) ++ return minor_id; ++ ++ new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL); ++ if (!new_minor) { ++ ret = -ENOMEM; ++ goto err_idr; ++ } ++ ++ new_minor->type = type; ++ new_minor->device = MKDEV(DRM_MAJOR, minor_id); ++ new_minor->dev = dev; ++ new_minor->index = minor_id; ++ ++ idr_replace(&drm_minors_idr, new_minor, minor_id); ++ ++ if (type == DRM_MINOR_LEGACY) { ++ ret = drm_proc_init(new_minor, minor_id, drm_proc_root); ++ if (ret) { ++ DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); ++ goto err_mem; ++ } ++ if (dev->driver->proc_init) { ++ ret = dev->driver->proc_init(new_minor); ++ if (ret) { ++ DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n"); ++ goto err_mem; ++ } ++ } ++ } else ++ new_minor->dev_root = NULL; ++ ++ ret = drm_sysfs_device_add(new_minor); ++ if (ret) { ++ printk(KERN_ERR ++ "DRM: Error sysfs_device_add.\n"); ++ goto err_g2; ++ } ++ *minor = new_minor; ++ ++ DRM_DEBUG("new minor assigned %d\n", minor_id); ++ return 0; ++ ++ ++err_g2: ++ if (new_minor->type == DRM_MINOR_LEGACY) { ++ if (dev->driver->proc_cleanup) ++ dev->driver->proc_cleanup(new_minor); ++ drm_proc_cleanup(new_minor, drm_proc_root); ++ } ++err_mem: ++ kfree(new_minor); ++err_idr: ++ idr_remove(&drm_minors_idr, minor_id); ++ *minor = NULL; ++ return ret; ++} ++ ++/** ++ * Register. ++ * ++ * \param pdev - PCI device structure ++ * \param ent entry from the PCI ID table with device type flags ++ * \return zero on success or a negative number on failure. ++ * ++ * Attempt to gets inter module "drm" information. If we are first ++ * then register the character device and inter module information. ++ * Try and register, if we fail to register, backout previous work. ++ */ ++int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ++ struct drm_driver *driver) ++{ ++ struct drm_device *dev; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); ++ if (!dev) ++ return -ENOMEM; ++ ++#ifdef CONFIG_PCI ++ if (!drm_fb_loaded) { ++ pci_set_drvdata(pdev, dev); ++ ret = pci_request_regions(pdev, driver->pci_driver.name); ++ if (ret) ++ goto err_g1; ++ } ++ ++ ret = pci_enable_device(pdev); ++ if (ret) ++ goto err_g2; ++ pci_set_master(pdev); ++#endif ++ ++ if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { ++ printk(KERN_ERR "DRM: fill_in_dev failed\n"); ++ goto err_g3; ++ } ++ ++ /* only add the control node on a modesetting platform */ ++ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) ++ goto err_g3; ++ ++ if (dev->driver->load) ++ if ((ret = dev->driver->load(dev, ent ? ent->driver_data : 0))) ++ goto err_g4; ++ ++ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", ++ driver->name, driver->major, driver->minor, driver->patchlevel, ++ driver->date, dev->primary->index); ++ ++ return 0; ++err_g4: ++ drm_put_minor(dev); ++err_g3: ++#ifdef CONFIG_PCI ++ if (!drm_fb_loaded) ++ pci_disable_device(pdev); ++err_g2: ++ if (!drm_fb_loaded) ++ pci_release_regions(pdev); ++err_g1: ++ if (!drm_fb_loaded) ++ pci_set_drvdata(pdev, NULL); ++#endif ++ ++ drm_free(dev, sizeof(*dev), DRM_MEM_STUB); ++ printk(KERN_ERR "DRM: drm_get_dev failed.\n"); ++ return ret; ++} ++EXPORT_SYMBOL(drm_get_dev); ++ ++ ++/** ++ * Put a device minor number. ++ * ++ * \param dev device data structure ++ * \return always zero ++ * ++ * Cleans up the proc resources. If it is the last minor then release the foreign ++ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and ++ * unregisters the character device. ++ */ ++int drm_put_dev(struct drm_device * dev) ++{ ++ DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); ++ ++ if (dev->unique) { ++ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); ++ dev->unique = NULL; ++ dev->unique_len = 0; ++ } ++ if (dev->devname) { ++ drm_free(dev->devname, strlen(dev->devname) + 1, ++ DRM_MEM_DRIVER); ++ dev->devname = NULL; ++ } ++ drm_free(dev, sizeof(*dev), DRM_MEM_STUB); ++ return 0; ++} ++ ++/** ++ * Put a secondary minor number. ++ * ++ * \param sec_minor - structure to be released ++ * \return always zero ++ * ++ * Cleans up the proc resources. Not legal for this to be the ++ * last minor released. ++ * ++ */ ++int drm_put_minor(struct drm_device *dev) ++{ ++ struct drm_minor **minor_p = &dev->primary; ++ struct drm_minor *minor = *minor_p; ++ DRM_DEBUG("release secondary minor %d\n", minor->index); ++ ++ if (minor->type == DRM_MINOR_LEGACY) { ++ if (dev->driver->proc_cleanup) ++ dev->driver->proc_cleanup(minor); ++ drm_proc_cleanup(minor, drm_proc_root); ++ } ++ drm_sysfs_device_remove(minor); ++ ++ idr_remove(&drm_minors_idr, minor->index); ++ ++ kfree(minor); ++ *minor_p = NULL; ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_sysfs.c git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c +--- git/drivers/gpu/drm-tungsten/drm_sysfs.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,212 @@ ++ ++/* ++ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support ++ * extra sysfs attribute from DRM. Normal drm_sysfs_class ++ * does not allow adding attributes. ++ * ++ * Copyright (c) 2004 Jon Smirl ++ * Copyright (c) 2003-2004 Greg Kroah-Hartman ++ * Copyright (c) 2003-2004 IBM Corp. ++ * ++ * This file is released under the GPLv2 ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#include "drm_core.h" ++#include "drmP.h" ++ ++#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) ++ ++/** ++ * drm_sysfs_suspend - DRM class suspend hook ++ * @dev: Linux device to suspend ++ * @state: power state to enter ++ * ++ * Just figures out what the actual struct drm_device associated with ++ * @dev is and calls its suspend hook, if present. ++ */ ++static int drm_sysfs_suspend(struct device *dev, pm_message_t state) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(dev); ++ struct drm_device *drm_dev = drm_minor->dev; ++ ++ printk(KERN_ERR "%s\n", __FUNCTION__); ++ ++ if (drm_dev->driver->suspend) ++ return drm_dev->driver->suspend(drm_dev, state); ++ ++ return 0; ++} ++ ++/** ++ * drm_sysfs_resume - DRM class resume hook ++ * @dev: Linux device to resume ++ * ++ * Just figures out what the actual struct drm_device associated with ++ * @dev is and calls its resume hook, if present. ++ */ ++static int drm_sysfs_resume(struct device *dev) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(dev); ++ struct drm_device *drm_dev = drm_minor->dev; ++ ++ if (drm_dev->driver->resume) ++ return drm_dev->driver->resume(drm_dev); ++ ++ return 0; ++} ++ ++/* Display the version of drm_core. This doesn't work right in current design */ ++static ssize_t version_show(struct class *dev, char *buf) ++{ ++ return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, ++ CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); ++} ++ ++static CLASS_ATTR(version, S_IRUGO, version_show, NULL); ++ ++/** ++ * drm_sysfs_create - create a struct drm_sysfs_class structure ++ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class ++ * @name: pointer to a string for the name of this class. ++ * ++ * This is used to create DRM class pointer that can then be used ++ * in calls to drm_sysfs_device_add(). ++ * ++ * Note, the pointer created here is to be destroyed when finished by making a ++ * call to drm_sysfs_destroy(). ++ */ ++struct class *drm_sysfs_create(struct module *owner, char *name) ++{ ++ struct class *class; ++ int err; ++ ++ class = class_create(owner, name); ++ if (IS_ERR(class)) { ++ err = PTR_ERR(class); ++ goto err_out; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) ++ class->suspend = drm_sysfs_suspend; ++ class->resume = drm_sysfs_resume; ++#endif ++ ++ err = class_create_file(class, &class_attr_version); ++ if (err) ++ goto err_out_class; ++ ++ return class; ++ ++err_out_class: ++ class_destroy(class); ++err_out: ++ return ERR_PTR(err); ++} ++ ++/** ++ * drm_sysfs_destroy - destroys DRM class ++ * ++ * Destroy the DRM device class. ++ */ ++void drm_sysfs_destroy(void) ++{ ++ if ((drm_class == NULL) || (IS_ERR(drm_class))) ++ return; ++ class_remove_file(drm_class, &class_attr_version); ++ class_destroy(drm_class); ++} ++ ++static ssize_t show_dri(struct device *device, struct device_attribute *attr, ++ char *buf) ++{ ++ struct drm_minor *drm_minor = to_drm_minor(device); ++ struct drm_device *drm_dev = drm_minor->dev; ++ if (drm_dev->driver->dri_library_name) ++ return drm_dev->driver->dri_library_name(drm_dev, buf); ++ return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name); ++} ++ ++static struct device_attribute device_attrs[] = { ++ __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), ++}; ++ ++/** ++ * drm_sysfs_device_release - do nothing ++ * @dev: Linux device ++ * ++ * Normally, this would free the DRM device associated with @dev, along ++ * with cleaning up any other stuff. But we do that in the DRM core, so ++ * this function can just return and hope that the core does its job. ++ */ ++static void drm_sysfs_device_release(struct device *dev) ++{ ++ return; ++} ++ ++/** ++ * drm_sysfs_device_add - adds a class device to sysfs for a character driver ++ * @dev: DRM device to be added ++ * @head: DRM head in question ++ * ++ * Add a DRM device to the DRM's device model class. We use @dev's PCI device ++ * as the parent for the Linux device, and make sure it has a file containing ++ * the driver we're using (for userspace compatibility). ++ */ ++int drm_sysfs_device_add(struct drm_minor *minor) ++{ ++ int err; ++ int i, j; ++ char *minor_str; ++ ++ minor->kdev.parent = minor->dev->pdev ? &minor->dev->pdev->dev : NULL; ++ minor->kdev.class = drm_class; ++ minor->kdev.release = drm_sysfs_device_release; ++ minor->kdev.devt = minor->device; ++ minor_str = "card%d"; ++ ++ snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); ++ ++ err = device_register(&minor->kdev); ++ if (err) { ++ DRM_ERROR("device add failed: %d\n", err); ++ goto err_out; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { ++ err = device_create_file(&minor->kdev, &device_attrs[i]); ++ if (err) ++ goto err_out_files; ++ } ++ ++ return 0; ++ ++err_out_files: ++ if (i > 0) ++ for (j = 0; j < i; j++) ++ device_remove_file(&minor->kdev, &device_attrs[j]); ++ device_unregister(&minor->kdev); ++err_out: ++ ++ return err; ++} ++ ++/** ++ * drm_sysfs_device_remove - remove DRM device ++ * @dev: DRM device to remove ++ * ++ * This call unregisters and cleans up a class device that was created with a ++ * call to drm_sysfs_device_add() ++ */ ++void drm_sysfs_device_remove(struct drm_minor *minor) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) ++ device_remove_file(&minor->kdev, &device_attrs[i]); ++ device_unregister(&minor->kdev); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_ttm.c git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c +--- git/drivers/gpu/drm-tungsten/drm_ttm.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,524 @@ ++/************************************************************************** ++ * ++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++ ++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) ++static void drm_clflush_page(struct page *page) ++{ ++ uint8_t *page_virtual; ++ unsigned int i; ++ ++ if (unlikely(page == NULL)) ++ return; ++ ++ page_virtual = kmap_atomic(page, KM_USER0); ++ ++ for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) ++ clflush(page_virtual + i); ++ ++ kunmap_atomic(page_virtual, KM_USER0); ++} ++ ++static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages) ++{ ++ unsigned long i; ++ ++ mb(); ++ for (i=0; i < num_pages; ++i) ++ drm_clflush_page(*pages++); ++ mb(); ++} ++#endif ++ ++static void drm_ttm_ipi_handler(void *null) ++{ ++#ifdef CONFIG_AGP ++ flush_agp_cache(); ++#endif ++} ++ ++void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages) ++{ ++ ++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) ++ if (cpu_has_clflush) { ++ drm_ttm_cache_flush_clflush(pages, num_pages); ++ return; ++ } ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1)) ++#else ++ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) ++#endif ++ DRM_ERROR("Timed out waiting for drm cache flush.\n"); ++} ++EXPORT_SYMBOL(drm_ttm_cache_flush); ++ ++/** ++ * Allocates storage for pointers to the pages that back the ttm. ++ * ++ * Uses kmalloc if possible. Otherwise falls back to vmalloc. ++ */ ++static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ttm->pages = NULL; ++ ++ if (drm_alloc_memctl(size)) ++ return; ++ ++ if (size <= PAGE_SIZE) ++ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); ++ ++ if (!ttm->pages) { ++ ttm->pages = vmalloc_user(size); ++ if (ttm->pages) ++ ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC; ++ } ++ if (!ttm->pages) ++ drm_free_memctl(size); ++} ++ ++static void drm_ttm_free_page_directory(struct drm_ttm *ttm) ++{ ++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ++ ++ if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) { ++ vfree(ttm->pages); ++ ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC; ++ } else { ++ drm_free(ttm->pages, size, DRM_MEM_TTM); ++ } ++ drm_free_memctl(size); ++ ttm->pages = NULL; ++} ++ ++static struct page *drm_ttm_alloc_page(void) ++{ ++ struct page *page; ++ ++ if (drm_alloc_memctl(PAGE_SIZE)) ++ return NULL; ++ ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!page) { ++ drm_free_memctl(PAGE_SIZE); ++ return NULL; ++ } ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ SetPageReserved(page); ++#endif ++ return page; ++} ++ ++/* ++ * Change caching policy for the linear kernel map ++ * for range of pages in a ttm. ++ */ ++ ++static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached) ++{ ++ int i; ++ struct page **cur_page; ++ int do_tlbflush = 0; ++ ++ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) ++ return 0; ++ ++ if (noncached) ++ drm_ttm_cache_flush(ttm->pages, ttm->num_pages); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++ if (!PageHighMem(*cur_page)) { ++#ifdef CONFIG_AGP ++ if (noncached) { ++ map_page_into_agp(*cur_page); ++ } else { ++ unmap_page_from_agp(*cur_page); ++ } ++#endif ++ do_tlbflush = 1; ++ } ++ } ++ } ++#ifdef CONFIG_AGP ++ if (do_tlbflush) ++ flush_agp_mappings(); ++#endif ++ ++ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); ++ ++ return 0; ++} ++ ++ ++static void drm_ttm_free_user_pages(struct drm_ttm *ttm) ++{ ++ int write; ++ int dirty; ++ struct page *page; ++ int i; ++ ++ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); ++ write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0); ++ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = ttm->pages[i]; ++ if (page == NULL) ++ continue; ++ ++ if (page == ttm->dummy_read_page) { ++ BUG_ON(write); ++ continue; ++ } ++ ++ if (write && dirty && !PageReserved(page)) ++ set_page_dirty_lock(page); ++ ++ ttm->pages[i] = NULL; ++ put_page(page); ++ } ++} ++ ++static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) ++{ ++ int i; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ struct page **cur_page; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ cur_page = ttm->pages + i; ++ if (*cur_page) { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) ++ ClearPageReserved(*cur_page); ++#endif ++ if (page_count(*cur_page) != 1) ++ DRM_ERROR("Erroneous page count. Leaking pages.\n"); ++ if (page_mapped(*cur_page)) ++ DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); ++ __free_page(*cur_page); ++ drm_free_memctl(PAGE_SIZE); ++ --bm->cur_pages; ++ } ++ } ++} ++ ++/* ++ * Free all resources associated with a ttm. ++ */ ++ ++int drm_ttm_destroy(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return 0; ++ ++ be = ttm->be; ++ if (be) { ++ be->func->destroy(be); ++ ttm->be = NULL; ++ } ++ ++ if (ttm->pages) { ++ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) ++ drm_ttm_set_caching(ttm, 0); ++ ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ drm_ttm_free_user_pages(ttm); ++ else ++ drm_ttm_free_alloced_pages(ttm); ++ ++ drm_ttm_free_page_directory(ttm); ++ } ++ ++ drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); ++ return 0; ++} ++ ++struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index) ++{ ++ struct page *p; ++ struct drm_buffer_manager *bm = &ttm->dev->bm; ++ ++ while(NULL == (p = ttm->pages[index])) { ++ p = drm_ttm_alloc_page(); ++ if (!p) ++ return NULL; ++ ++ if (PageHighMem(p)) ++ ttm->pages[--ttm->first_himem_page] = p; ++ else ++ ttm->pages[++ttm->last_lomem_page] = p; ++ ++ ++bm->cur_pages; ++ } ++ return p; ++} ++EXPORT_SYMBOL(drm_ttm_get_page); ++ ++/** ++ * drm_ttm_set_user: ++ * ++ * @ttm: the ttm to map pages to. This must always be ++ * a freshly created ttm. ++ * ++ * @tsk: a pointer to the address space from which to map ++ * pages. ++ * ++ * @write: a boolean indicating that write access is desired ++ * ++ * start: the starting address ++ * ++ * Map a range of user addresses to a new ttm object. This ++ * provides access to user memory from the graphics device. ++ */ ++int drm_ttm_set_user(struct drm_ttm *ttm, ++ struct task_struct *tsk, ++ unsigned long start, ++ unsigned long num_pages) ++{ ++ struct mm_struct *mm = tsk->mm; ++ int ret; ++ int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0; ++ ++ BUG_ON(num_pages != ttm->num_pages); ++ BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0); ++ ++ down_read(&mm->mmap_sem); ++ ret = get_user_pages(tsk, mm, start, num_pages, ++ write, 0, ttm->pages, NULL); ++ up_read(&mm->mmap_sem); ++ ++ if (ret != num_pages && write) { ++ drm_ttm_free_user_pages(ttm); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * drm_ttm_populate: ++ * ++ * @ttm: the object to allocate pages for ++ * ++ * Allocate pages for all unset page entries, then ++ * call the backend to create the hardware mappings ++ */ ++int drm_ttm_populate(struct drm_ttm *ttm) ++{ ++ struct page *page; ++ unsigned long i; ++ struct drm_ttm_backend *be; ++ ++ if (ttm->state != ttm_unpopulated) ++ return 0; ++ ++ be = ttm->be; ++ ++ for (i = 0; i < ttm->num_pages; ++i) { ++ page = drm_ttm_get_page(ttm, i); ++ if (!page) ++ return -ENOMEM; ++ } ++ ++ be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page); ++ ttm->state = ttm_unbound; ++ return 0; ++} ++ ++/** ++ * drm_ttm_create: ++ * ++ * @dev: the drm_device ++ * ++ * @size: The size (in bytes) of the desired object ++ * ++ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h. ++ * ++ * Allocate and initialize a ttm, leaving it unpopulated at this time ++ */ ++ ++struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, ++ uint32_t page_flags, struct page *dummy_read_page) ++{ ++ struct drm_bo_driver *bo_driver = dev->driver->bo_driver; ++ struct drm_ttm *ttm; ++ ++ if (!bo_driver) ++ return NULL; ++ ++ ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); ++ if (!ttm) ++ return NULL; ++ ++ ttm->dev = dev; ++ atomic_set(&ttm->vma_count, 0); ++ ++ ttm->destroy = 0; ++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ ttm->first_himem_page = ttm->num_pages; ++ ttm->last_lomem_page = -1; ++ ++ ttm->page_flags = page_flags; ++ ++ ttm->dummy_read_page = dummy_read_page; ++ ++ /* ++ * Account also for AGP module memory usage. ++ */ ++ ++ drm_ttm_alloc_page_directory(ttm); ++ if (!ttm->pages) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed allocating page table\n"); ++ return NULL; ++ } ++ ttm->be = bo_driver->create_ttm_backend_entry(dev); ++ if (!ttm->be) { ++ drm_ttm_destroy(ttm); ++ DRM_ERROR("Failed creating ttm backend entry\n"); ++ return NULL; ++ } ++ ttm->state = ttm_unpopulated; ++ return ttm; ++} ++ ++/** ++ * drm_ttm_evict: ++ * ++ * @ttm: the object to be unbound from the aperture. ++ * ++ * Transition a ttm from bound to evicted, where it ++ * isn't present in the aperture, but various caches may ++ * not be consistent. ++ */ ++void drm_ttm_evict(struct drm_ttm *ttm) ++{ ++ struct drm_ttm_backend *be = ttm->be; ++ int ret; ++ ++ if (ttm->state == ttm_bound) { ++ ret = be->func->unbind(be); ++ BUG_ON(ret); ++ } ++ ++ ttm->state = ttm_evicted; ++} ++ ++/** ++ * drm_ttm_fixup_caching: ++ * ++ * @ttm: the object to set unbound ++ * ++ * XXX this function is misnamed. Transition a ttm from evicted to ++ * unbound, flushing caches as appropriate. ++ */ ++void drm_ttm_fixup_caching(struct drm_ttm *ttm) ++{ ++ ++ if (ttm->state == ttm_evicted) { ++ struct drm_ttm_backend *be = ttm->be; ++ if (be->func->needs_ub_cache_adjust(be)) ++ drm_ttm_set_caching(ttm, 0); ++ ttm->state = ttm_unbound; ++ } ++} ++ ++/** ++ * drm_ttm_unbind: ++ * ++ * @ttm: the object to unbind from the graphics device ++ * ++ * Unbind an object from the aperture. This removes the mappings ++ * from the graphics device and flushes caches if necessary. ++ */ ++void drm_ttm_unbind(struct drm_ttm *ttm) ++{ ++ if (ttm->state == ttm_bound) ++ drm_ttm_evict(ttm); ++ ++ drm_ttm_fixup_caching(ttm); ++} ++ ++/** ++ * drm_ttm_bind: ++ * ++ * @ttm: the ttm object to bind to the graphics device ++ * ++ * @bo_mem: the aperture memory region which will hold the object ++ * ++ * Bind a ttm object to the aperture. This ensures that the necessary ++ * pages are allocated, flushes CPU caches as needed and marks the ++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been ++ * modified by the GPU ++ */ ++int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) ++{ ++ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; ++ int ret = 0; ++ struct drm_ttm_backend *be; ++ ++ if (!ttm) ++ return -EINVAL; ++ if (ttm->state == ttm_bound) ++ return 0; ++ ++ be = ttm->be; ++ ++ ret = drm_ttm_populate(ttm); ++ if (ret) ++ return ret; ++ ++ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) ++ drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); ++ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && ++ bo_driver->ttm_cache_flush) ++ bo_driver->ttm_cache_flush(ttm); ++ ++ ret = be->func->bind(be, bo_mem); ++ if (ret) { ++ ttm->state = ttm_evicted; ++ DRM_ERROR("Couldn't bind backend.\n"); ++ return ret; ++ } ++ ++ ttm->state = ttm_bound; ++ if (ttm->page_flags & DRM_TTM_PAGE_USER) ++ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; ++ return 0; ++} ++EXPORT_SYMBOL(drm_ttm_bind); +diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm.c git-nokia/drivers/gpu/drm-tungsten/drm_vm.c +--- git/drivers/gpu/drm-tungsten/drm_vm.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,890 @@ ++/** ++ * \file drm_vm.c ++ * Memory mapping for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#if defined(__ia64__) ++#include ++#endif ++ ++static void drm_vm_open(struct vm_area_struct *vma); ++static void drm_vm_close(struct vm_area_struct *vma); ++static int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map); ++ ++ ++pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) ++{ ++ pgprot_t tmp = vm_get_page_prot(vma->vm_flags); ++ ++#if defined(__i386__) || defined(__x86_64__) ++ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { ++ pgprot_val(tmp) |= _PAGE_PCD; ++ pgprot_val(tmp) &= ~_PAGE_PWT; ++ } ++#elif defined(__powerpc__) ++ pgprot_val(tmp) |= _PAGE_NO_CACHE; ++ if (map_type == _DRM_REGISTERS) ++ pgprot_val(tmp) |= _PAGE_GUARDED; ++#elif defined(__ia64__) ++ if (efi_range_is_wc(vma->vm_start, vma->vm_end - ++ vma->vm_start)) ++ tmp = pgprot_writecombine(tmp); ++ else ++ tmp = pgprot_noncached(tmp); ++#elif defined(__sparc__) ++ tmp = pgprot_noncached(tmp); ++#endif ++ return tmp; ++} ++ ++static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) ++{ ++ pgprot_t tmp = vm_get_page_prot(vma->vm_flags); ++ ++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) ++ tmp |= _PAGE_NO_CACHE; ++#endif ++ return tmp; ++} ++ ++#ifndef DRM_VM_NOPAGE ++/** ++ * \c fault method for AGP virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Find the right map and if it's AGP memory find the real physical page to ++ * map, get the page, increment the use count and return it. ++ */ ++#if __OS_HAS_AGP ++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list; ++ struct drm_hash_item *hash; ++ ++ /* ++ * Find the right map ++ */ ++ if (!drm_core_has_AGP(dev)) ++ goto vm_fault_error; ++ ++ if (!dev->agp || !dev->agp->cant_use_aperture) ++ goto vm_fault_error; ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) ++ goto vm_fault_error; ++ ++ r_list = drm_hash_entry(hash, struct drm_map_list, hash); ++ map = r_list->map; ++ ++ if (map && map->type == _DRM_AGP) { ++ /* ++ * Using vm_pgoff as a selector forces us to use this unusual ++ * addressing scheme. ++ */ ++ unsigned long offset = (unsigned long)vmf->virtual_address - ++ vma->vm_start; ++ unsigned long baddr = map->offset + offset; ++ struct drm_agp_mem *agpmem; ++ struct page *page; ++ ++#ifdef __alpha__ ++ /* ++ * Adjust to a bus-relative address ++ */ ++ baddr -= dev->hose->mem_space->start; ++#endif ++ ++ /* ++ * It's AGP memory - find the real physical page to map ++ */ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) { ++ if (agpmem->bound <= baddr && ++ agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) ++ break; ++ } ++ ++ if (!agpmem) ++ goto vm_fault_error; ++ ++ /* ++ * Get the page, inc the use count, and return it ++ */ ++ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; ++ page = virt_to_page(__va(agpmem->memory->memory[offset])); ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG ++ ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ++ baddr, __va(agpmem->memory->memory[offset]), offset, ++ page_count(page)); ++ return 0; ++ } ++vm_fault_error: ++ return VM_FAULT_SIGBUS; /* Disallow mremap */ ++} ++#else /* __OS_HAS_AGP */ ++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ return VM_FAULT_SIGBUS; ++} ++#endif /* __OS_HAS_AGP */ ++ ++/** ++ * \c nopage method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Get the mapping, find the real physical page to map, get the page, and ++ * return it. ++ */ ++static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ unsigned long offset; ++ unsigned long i; ++ struct page *page; ++ ++ if (!map) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; ++ i = (unsigned long)map->handle + offset; ++ page = vmalloc_to_page((void *)i); ++ if (!page) ++ return VM_FAULT_SIGBUS; ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG("shm_fault 0x%lx\n", offset); ++ return 0; ++} ++#endif ++ ++/** ++ * \c close method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * ++ * Deletes map information if we are the last ++ * person to close a mapping and it's not in the global maplist. ++ */ ++static void drm_vm_shm_close(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *pt, *temp; ++ struct drm_map *map; ++ struct drm_map_list *r_list; ++ int found_maps = 0; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_dec(&dev->vma_count); ++ ++ map = vma->vm_private_data; ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { ++ if (pt->vma->vm_private_data == map) ++ found_maps++; ++ if (pt->vma == vma) { ++ list_del(&pt->head); ++ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); ++ } ++ } ++ /* We were the only map that was found */ ++ if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { ++ /* Check to see if we are in the maplist, if we are not, then ++ * we delete this mappings information. ++ */ ++ found_maps = 0; ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map == map) ++ found_maps++; ++ } ++ ++ if (!found_maps) { ++ drm_dma_handle_t dmah; ++ ++ switch (map->type) { ++ case _DRM_REGISTERS: ++ case _DRM_FRAME_BUFFER: ++ if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { ++ int retcode; ++ retcode = mtrr_del(map->mtrr, ++ map->offset, ++ map->size); ++ DRM_DEBUG("mtrr_del = %d\n", retcode); ++ } ++ iounmap(map->handle); ++ break; ++ case _DRM_SHM: ++ vfree(map->handle); ++ break; ++ case _DRM_AGP: ++ case _DRM_SCATTER_GATHER: ++ break; ++ case _DRM_CONSISTENT: ++ dmah.vaddr = map->handle; ++ dmah.busaddr = map->offset; ++ dmah.size = map->size; ++ __drm_pci_free(dev, &dmah); ++ break; ++ case _DRM_TTM: ++ BUG_ON(1); ++ break; ++ } ++ drm_free(map, sizeof(*map), DRM_MEM_MAPS); ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++#ifndef DRM_VM_NOPAGE ++/** ++ * \c fault method for DMA virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist. ++ */ ++static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_device_dma *dma = dev->dma; ++ unsigned long offset; ++ unsigned long page_nr; ++ struct page *page; ++ ++ if (!dma) ++ return VM_FAULT_SIGBUS; /* Error */ ++ if (!dma->pagelist) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ ++ page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ ++ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); ++ ++ get_page(page); ++ vmf->page = page; ++ ++ DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); ++ return 0; ++} ++ ++/** ++ * \c fault method for scatter-gather virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. ++ */ ++static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long offset; ++ unsigned long map_offset; ++ unsigned long page_offset; ++ struct page *page; ++ ++ if (!entry) ++ return VM_FAULT_SIGBUS; /* Error */ ++ if (!entry->pagelist) ++ return VM_FAULT_SIGBUS; /* Nothing allocated */ ++ ++ offset = (unsigned long)vmf->virtual_address - vma->vm_start; ++ map_offset = map->offset - (unsigned long)dev->sg->virtual; ++ page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); ++ page = entry->pagelist[page_offset]; ++ get_page(page); ++ vmf->page = page; ++ ++ return 0; ++} ++#endif ++ ++/** AGP virtual memory operations */ ++static struct vm_operations_struct drm_vm_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_nopage, ++#else ++ .fault = drm_do_vm_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** Shared virtual memory operations */ ++static struct vm_operations_struct drm_vm_shm_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_shm_nopage, ++#else ++ .fault = drm_do_vm_shm_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_shm_close, ++}; ++ ++/** DMA virtual memory operations */ ++static struct vm_operations_struct drm_vm_dma_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_dma_nopage, ++#else ++ .fault = drm_do_vm_dma_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** Scatter-gather virtual memory operations */ ++static struct vm_operations_struct drm_vm_sg_ops = { ++#ifdef DRM_VM_NOPAGE ++ .nopage = drm_vm_sg_nopage, ++#else ++ .fault = drm_do_vm_sg_fault, ++#endif ++ .open = drm_vm_open, ++ .close = drm_vm_close, ++}; ++ ++/** ++ * \c open method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * ++ * Create a new drm_vma_entry structure as the \p vma private data entry and ++ * add it to drm_device::vmalist. ++ */ ++static void drm_vm_open_locked(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *vma_entry; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_inc(&dev->vma_count); ++ ++ vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); ++ if (vma_entry) { ++ vma_entry->vma = vma; ++ vma_entry->pid = current->pid; ++ list_add(&vma_entry->head, &dev->vmalist); ++ } ++} ++ ++static void drm_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_vm_open_locked(vma); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * \c close method for all virtual memory types. ++ * ++ * \param vma virtual memory area. ++ * ++ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and ++ * free it. ++ */ ++static void drm_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_vma_entry *pt, *temp; ++ ++ DRM_DEBUG("0x%08lx,0x%08lx\n", ++ vma->vm_start, vma->vm_end - vma->vm_start); ++ atomic_dec(&dev->vma_count); ++ ++ mutex_lock(&dev->struct_mutex); ++ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { ++ if (pt->vma == vma) { ++ list_del(&pt->head); ++ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); ++ break; ++ } ++ } ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++ ++/** ++ * mmap DMA memory. ++ * ++ * \param file_priv DRM file private. ++ * \param vma virtual memory area. ++ * \return zero on success or a negative number on failure. ++ * ++ * Sets the virtual memory area operations structure to vm_dma_ops, the file ++ * pointer, and calls vm_open(). ++ */ ++static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev; ++ struct drm_device_dma *dma; ++ unsigned long length = vma->vm_end - vma->vm_start; ++ ++ dev = priv->minor->dev; ++ dma = dev->dma; ++ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", ++ vma->vm_start, vma->vm_end, vma->vm_pgoff); ++ ++ /* Length must match exact page count */ ++ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { ++ return -EINVAL; ++ } ++ ++ if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { ++ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); ++#if defined(__i386__) || defined(__x86_64__) ++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; ++#else ++ /* Ye gads this is ugly. With more thought ++ we could move this up higher and use ++ `protection_map' instead. */ ++ vma->vm_page_prot = ++ __pgprot(pte_val ++ (pte_wrprotect ++ (__pte(pgprot_val(vma->vm_page_prot))))); ++#endif ++ } ++ ++ vma->vm_ops = &drm_vm_dma_ops; ++ vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ ++ vma->vm_file = filp; /* Needed for drm_vm_open() */ ++ drm_vm_open_locked(vma); ++ return 0; ++} ++ ++unsigned long drm_core_get_map_ofs(struct drm_map * map) ++{ ++ return map->offset; ++} ++EXPORT_SYMBOL(drm_core_get_map_ofs); ++ ++unsigned long drm_core_get_reg_ofs(struct drm_device *dev) ++{ ++#ifdef __alpha__ ++ return dev->hose->dense_mem_base - dev->hose->mem_space->start; ++#else ++ return 0; ++#endif ++} ++EXPORT_SYMBOL(drm_core_get_reg_ofs); ++ ++/** ++ * mmap DMA memory. ++ * ++ * \param file_priv DRM file private. ++ * \param vma virtual memory area. ++ * \return zero on success or a negative number on failure. ++ * ++ * If the virtual memory area has no offset associated with it then it's a DMA ++ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, ++ * checks that the restricted flag is not set, sets the virtual memory operations ++ * according to the mapping type and remaps the pages. Finally sets the file ++ * pointer and calls vm_open(). ++ */ ++static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ unsigned long offset = 0; ++ struct drm_hash_item *hash; ++ ++ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", ++ vma->vm_start, vma->vm_end, vma->vm_pgoff); ++ ++ if (!priv->authenticated) ++ return -EACCES; ++ ++ /* We check for "dma". On Apple's UniNorth, it's valid to have ++ * the AGP mapped at physical address 0 ++ * --BenH. ++ */ ++ ++ if (!vma->vm_pgoff ++#if __OS_HAS_AGP ++ && (!dev->agp ++ || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) ++#endif ++ ) ++ return drm_mmap_dma(filp, vma); ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { ++ DRM_ERROR("Could not find map\n"); ++ return -EINVAL; ++ } ++ ++ map = drm_hash_entry(hash, struct drm_map_list, hash)->map; ++ if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) ++ return -EPERM; ++ ++ /* Check for valid size. */ ++ if (map->size < vma->vm_end - vma->vm_start) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { ++ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); ++#if defined(__i386__) || defined(__x86_64__) ++ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; ++#else ++ /* Ye gads this is ugly. With more thought ++ we could move this up higher and use ++ `protection_map' instead. */ ++ vma->vm_page_prot = ++ __pgprot(pte_val ++ (pte_wrprotect ++ (__pte(pgprot_val(vma->vm_page_prot))))); ++#endif ++ } ++ ++ switch (map->type) { ++ case _DRM_AGP: ++ if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { ++ /* ++ * On some platforms we can't talk to bus dma address from the CPU, so for ++ * memory of type DRM_AGP, we'll deal with sorting out the real physical ++ * pages and mappings in nopage() ++ */ ++#if defined(__powerpc__) ++ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; ++#endif ++ vma->vm_ops = &drm_vm_ops; ++ break; ++ } ++ /* fall through to _DRM_FRAME_BUFFER... */ ++ case _DRM_FRAME_BUFFER: ++ case _DRM_REGISTERS: ++ offset = dev->driver->get_reg_ofs(dev); ++ vma->vm_flags |= VM_IO; /* not in core dump */ ++ vma->vm_page_prot = drm_io_prot(map->type, vma); ++ if (io_remap_pfn_range(vma, vma->vm_start, ++ (map->offset + offset) >> PAGE_SHIFT, ++ vma->vm_end - vma->vm_start, ++ vma->vm_page_prot)) ++ return -EAGAIN; ++ DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," ++ " offset = 0x%lx\n", ++ map->type, ++ vma->vm_start, vma->vm_end, map->offset + offset); ++ vma->vm_ops = &drm_vm_ops; ++ break; ++ case _DRM_CONSISTENT: ++ /* Consistent memory is really like shared memory. But ++ * it's allocated in a different way, so avoid nopage */ ++ if (remap_pfn_range(vma, vma->vm_start, ++ page_to_pfn(virt_to_page(map->handle)), ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ return -EAGAIN; ++ vma->vm_page_prot = drm_dma_prot(map->type, vma); ++ /* fall through to _DRM_SHM */ ++ case _DRM_SHM: ++ vma->vm_ops = &drm_vm_shm_ops; ++ vma->vm_private_data = (void *)map; ++ /* Don't let this area swap. Change when ++ DRM_KERNEL advisory is supported. */ ++ vma->vm_flags |= VM_RESERVED; ++ break; ++ case _DRM_SCATTER_GATHER: ++ vma->vm_ops = &drm_vm_sg_ops; ++ vma->vm_private_data = (void *)map; ++ vma->vm_flags |= VM_RESERVED; ++ vma->vm_page_prot = drm_dma_prot(map->type, vma); ++ break; ++ case _DRM_TTM: ++ return drm_bo_mmap_locked(vma, filp, map); ++ default: ++ return -EINVAL; /* This should never happen. */ ++ } ++ vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ ++ vma->vm_file = filp; /* Needed for drm_vm_open() */ ++ drm_vm_open_locked(vma); ++ return 0; ++} ++ ++int drm_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = drm_mmap_locked(filp, vma); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL(drm_mmap); ++ ++/** ++ * buffer object vm functions. ++ */ ++ ++/** ++ * \c Pagefault method for buffer objects. ++ * ++ * \param vma Virtual memory area. ++ * \param vmf vm fault data ++ * \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted. ++ * ++ * It's important that pfns are inserted while holding the bo->mutex lock. ++ * otherwise we might race with unmap_mapping_range() which is always ++ * called with the bo->mutex lock held. ++ * ++ * We're modifying the page attribute bits of the vma->vm_page_prot field, ++ * without holding the mmap_sem in write mode. Only in read mode. ++ * These bits are not used by the mm subsystem code, and we consider them ++ * protected by the bo->mutex lock. ++ */ ++ ++#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT) ++static int drm_bo_vm_fault(struct vm_area_struct *vma, ++ struct vm_fault *vmf) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ unsigned long page_offset; ++ struct page *page = NULL; ++ struct drm_ttm *ttm; ++ struct drm_device *dev; ++ unsigned long pfn; ++ int err; ++ unsigned long bus_base; ++ unsigned long bus_offset; ++ unsigned long bus_size; ++ unsigned long ret = VM_FAULT_NOPAGE; ++ ++ dev = bo->dev; ++ err = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (err) ++ return VM_FAULT_NOPAGE; ++ ++ err = mutex_lock_interruptible(&bo->mutex); ++ if (err) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return VM_FAULT_NOPAGE; ++ } ++ ++ err = drm_bo_wait(bo, 0, 1, 0, 1); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ goto out_unlock; ++ } ++ ++ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; ++ ++ /* ++ * If buffer happens to be in a non-mappable location, ++ * move it to a mappable. ++ */ ++ ++ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { ++ uint32_t new_flags = bo->mem.proposed_flags | ++ DRM_BO_FLAG_MAPPABLE | ++ DRM_BO_FLAG_FORCE_MAPPABLE; ++ err = drm_bo_move_buffer(bo, new_flags, 0, 0); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++ } ++ ++ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, ++ &bus_size); ++ ++ if (err) { ++ ret = VM_FAULT_SIGBUS; ++ goto out_unlock; ++ } ++ ++ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; ++ ++ if (bus_size) { ++ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; ++ ++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; ++ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); ++ } else { ++ ttm = bo->ttm; ++ ++ drm_ttm_fixup_caching(ttm); ++ page = drm_ttm_get_page(ttm, page_offset); ++ if (!page) { ++ ret = VM_FAULT_OOM; ++ goto out_unlock; ++ } ++ pfn = page_to_pfn(page); ++ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? ++ vm_get_page_prot(vma->vm_flags) : ++ drm_io_prot(_DRM_TTM, vma); ++ } ++ ++ err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ++ if (err) { ++ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE; ++ goto out_unlock; ++ } ++out_unlock: ++ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ++ mutex_unlock(&bo->mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} ++#endif ++ ++static void drm_bo_vm_open_locked(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ ++ drm_vm_open_locked(vma); ++ atomic_inc(&bo->usage); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_add_vma(bo, vma); ++#endif ++} ++ ++/** ++ * \c vma open method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_vm_open_locked(vma); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * \c vma close method for buffer objects. ++ * ++ * \param vma virtual memory area. ++ */ ++ ++static void drm_bo_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; ++ struct drm_device *dev = bo->dev; ++ ++ drm_vm_close(vma); ++ if (bo) { ++ mutex_lock(&dev->struct_mutex); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_delete_vma(bo, vma); ++#endif ++ drm_bo_usage_deref_locked((struct drm_buffer_object **) ++ &vma->vm_private_data); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return; ++} ++ ++static struct vm_operations_struct drm_bo_vm_ops = { ++#ifdef DRM_FULL_MM_COMPAT ++#ifdef DRM_NO_FAULT ++ .nopfn = drm_bo_vm_nopfn, ++#else ++ .fault = drm_bo_vm_fault, ++#endif ++#else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ .nopfn = drm_bo_vm_nopfn, ++#else ++ .nopage = drm_bo_vm_nopage, ++#endif ++#endif ++ .open = drm_bo_vm_open, ++ .close = drm_bo_vm_close, ++}; ++ ++/** ++ * mmap buffer object memory. ++ * ++ * \param vma virtual memory area. ++ * \param file_priv DRM file private. ++ * \param map The buffer object drm map. ++ * \return zero on success or a negative number on failure. ++ */ ++ ++int drm_bo_mmap_locked(struct vm_area_struct *vma, ++ struct file *filp, ++ drm_local_map_t *map) ++{ ++ vma->vm_ops = &drm_bo_vm_ops; ++ vma->vm_private_data = map->handle; ++ vma->vm_file = filp; ++ vma->vm_flags |= VM_RESERVED | VM_IO; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ vma->vm_flags |= VM_PFNMAP; ++#endif ++ drm_bo_vm_open_locked(vma); ++#ifdef DRM_ODD_MM_COMPAT ++ drm_bo_map_bound(vma); ++#endif ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c +--- git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,267 @@ ++/** ++ * \file drm_vm.c ++ * Memory mapping for DRM ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++ ++#ifdef DRM_VM_NOPAGE ++/** ++ * \c nopage method for AGP virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Find the right map and if it's AGP memory find the real physical page to ++ * map, get the page, increment the use count and return it. ++ */ ++#if __OS_HAS_AGP ++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_map *map = NULL; ++ struct drm_map_list *r_list; ++ struct drm_hash_item *hash; ++ ++ /* ++ * Find the right map ++ */ ++ if (!drm_core_has_AGP(dev)) ++ goto vm_nopage_error; ++ ++ if (!dev->agp || !dev->agp->cant_use_aperture) ++ goto vm_nopage_error; ++ ++ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) ++ goto vm_nopage_error; ++ ++ r_list = drm_hash_entry(hash, struct drm_map_list, hash); ++ map = r_list->map; ++ ++ if (map && map->type == _DRM_AGP) { ++ unsigned long offset = address - vma->vm_start; ++ unsigned long baddr = map->offset + offset; ++ struct drm_agp_mem *agpmem; ++ struct page *page; ++ ++#ifdef __alpha__ ++ /* ++ * Adjust to a bus-relative address ++ */ ++ baddr -= dev->hose->mem_space->start; ++#endif ++ ++ /* ++ * It's AGP memory - find the real physical page to map ++ */ ++ list_for_each_entry(agpmem, &dev->agp->memory, head) { ++ if (agpmem->bound <= baddr && ++ agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) ++ break; ++ } ++ ++ if (!agpmem) ++ goto vm_nopage_error; ++ ++ /* ++ * Get the page, inc the use count, and return it ++ */ ++ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; ++ page = virt_to_page(__va(agpmem->memory->memory[offset])); ++ get_page(page); ++ ++#if 0 ++ /* page_count() not defined everywhere */ ++ DRM_DEBUG ++ ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", ++ baddr, __va(agpmem->memory->memory[offset]), offset, ++ page_count(page)); ++#endif ++ ++ return page; ++ } ++ vm_nopage_error: ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++} ++#else /* __OS_HAS_AGP */ ++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ return NOPAGE_SIGBUS; ++} ++#endif /* __OS_HAS_AGP */ ++ ++/** ++ * \c nopage method for shared virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Get the mapping, find the real physical page to map, get the page, and ++ * return it. ++ */ ++static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ unsigned long offset; ++ unsigned long i; ++ struct page *page; ++ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!map) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; ++ i = (unsigned long)map->handle + offset; ++ page = vmalloc_to_page((void *)i); ++ if (!page) ++ return NOPAGE_SIGBUS; ++ get_page(page); ++ ++ DRM_DEBUG("0x%lx\n", address); ++ return page; ++} ++ ++/** ++ * \c nopage method for DMA virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist. ++ */ ++static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_device_dma *dma = dev->dma; ++ unsigned long offset; ++ unsigned long page_nr; ++ struct page *page; ++ ++ if (!dma) ++ return NOPAGE_SIGBUS; /* Error */ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!dma->pagelist) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ ++ page_nr = offset >> PAGE_SHIFT; ++ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); ++ ++ get_page(page); ++ ++ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); ++ return page; ++} ++ ++/** ++ * \c nopage method for scatter-gather virtual memory. ++ * ++ * \param vma virtual memory area. ++ * \param address access address. ++ * \return pointer to the page structure. ++ * ++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. ++ */ ++static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address) ++{ ++ struct drm_map *map = (struct drm_map *) vma->vm_private_data; ++ struct drm_file *priv = vma->vm_file->private_data; ++ struct drm_device *dev = priv->minor->dev; ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long offset; ++ unsigned long map_offset; ++ unsigned long page_offset; ++ struct page *page; ++ ++ DRM_DEBUG("\n"); ++ if (!entry) ++ return NOPAGE_SIGBUS; /* Error */ ++ if (address > vma->vm_end) ++ return NOPAGE_SIGBUS; /* Disallow mremap */ ++ if (!entry->pagelist) ++ return NOPAGE_SIGBUS; /* Nothing allocated */ ++ ++ offset = address - vma->vm_start; ++ map_offset = map->offset - (unsigned long)dev->sg->virtual; ++ page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); ++ page = entry->pagelist[page_offset]; ++ get_page(page); ++ ++ return page; ++} ++ ++ ++struct page *drm_vm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_nopage(vma, address); ++} ++ ++struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_shm_nopage(vma, address); ++} ++ ++struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_dma_nopage(vma, address); ++} ++ ++struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, ++ unsigned long address, int *type) ++{ ++ if (type) ++ *type = VM_FAULT_MINOR; ++ return drm_do_vm_sg_nopage(vma, address); ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/ffb_context.c git-nokia/drivers/gpu/drm-tungsten/ffb_context.c +--- git/drivers/gpu/drm-tungsten/ffb_context.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/ffb_context.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,582 @@ ++/* $Id$ ++ * ffb_context.c: Creator/Creator3D DRI/DRM context switching. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ * ++ * Almost entirely stolen from tdfx_context.c, see there ++ * for authors. ++ */ ++ ++#include ++#include ++ ++#include "drmP.h" ++#include "ffb_drv.h" ++ ++static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) { ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int i; ++ ++ for (i = 0; i < FFB_MAX_CTXS; i++) { ++ if (fpriv->hw_state[i] == NULL) ++ break; ++ } ++ if (i == FFB_MAX_CTXS) ++ return -1; ++ ++ fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL); ++ if (fpriv->hw_state[i] == NULL) ++ return -1; ++ ++ fpriv->hw_state[i]->is_2d_only = is_2d_only; ++ ++ /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */ ++ return i + 1; ++} ++ ++static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx) ++{ ++ ffb_fbcPtr ffb = fpriv->regs; ++ struct ffb_hw_context *ctx; ++ int i; ++ ++ ctx = fpriv->hw_state[idx - 1]; ++ if (idx == 0 || ctx == NULL) ++ return; ++ ++ if (ctx->is_2d_only) { ++ /* 2D applications only care about certain pieces ++ * of state. ++ */ ++ ctx->drawop = upa_readl(&ffb->drawop); ++ ctx->ppc = upa_readl(&ffb->ppc); ++ ctx->wid = upa_readl(&ffb->wid); ++ ctx->fg = upa_readl(&ffb->fg); ++ ctx->bg = upa_readl(&ffb->bg); ++ ctx->xclip = upa_readl(&ffb->xclip); ++ ctx->fbc = upa_readl(&ffb->fbc); ++ ctx->rop = upa_readl(&ffb->rop); ++ ctx->cmp = upa_readl(&ffb->cmp); ++ ctx->matchab = upa_readl(&ffb->matchab); ++ ctx->magnab = upa_readl(&ffb->magnab); ++ ctx->pmask = upa_readl(&ffb->pmask); ++ ctx->xpmask = upa_readl(&ffb->xpmask); ++ ctx->lpat = upa_readl(&ffb->lpat); ++ ctx->fontxy = upa_readl(&ffb->fontxy); ++ ctx->fontw = upa_readl(&ffb->fontw); ++ ctx->fontinc = upa_readl(&ffb->fontinc); ++ ++ /* stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ ctx->stencil = upa_readl(&ffb->stencil); ++ ctx->stencilctl = upa_readl(&ffb->stencilctl); ++ } ++ ++ for (i = 0; i < 32; i++) ++ ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); ++ ctx->ucsr = upa_readl(&ffb->ucsr); ++ return; ++ } ++ ++ /* Fetch drawop. */ ++ ctx->drawop = upa_readl(&ffb->drawop); ++ ++ /* If we were saving the vertex registers, this is where ++ * we would do it. We would save 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ /* Capture rendering attributes. */ ++ ++ ctx->ppc = upa_readl(&ffb->ppc); /* Pixel Processor Control */ ++ ctx->wid = upa_readl(&ffb->wid); /* Current WID */ ++ ctx->fg = upa_readl(&ffb->fg); /* Constant FG color */ ++ ctx->bg = upa_readl(&ffb->bg); /* Constant BG color */ ++ ctx->consty = upa_readl(&ffb->consty); /* Constant Y */ ++ ctx->constz = upa_readl(&ffb->constz); /* Constant Z */ ++ ctx->xclip = upa_readl(&ffb->xclip); /* X plane clip */ ++ ctx->dcss = upa_readl(&ffb->dcss); /* Depth Cue Scale Slope */ ++ ctx->vclipmin = upa_readl(&ffb->vclipmin); /* Primary XY clip, minimum */ ++ ctx->vclipmax = upa_readl(&ffb->vclipmax); /* Primary XY clip, maximum */ ++ ctx->vclipzmin = upa_readl(&ffb->vclipzmin); /* Primary Z clip, minimum */ ++ ctx->vclipzmax = upa_readl(&ffb->vclipzmax); /* Primary Z clip, maximum */ ++ ctx->dcsf = upa_readl(&ffb->dcsf); /* Depth Cue Scale Front Bound */ ++ ctx->dcsb = upa_readl(&ffb->dcsb); /* Depth Cue Scale Back Bound */ ++ ctx->dczf = upa_readl(&ffb->dczf); /* Depth Cue Scale Z Front */ ++ ctx->dczb = upa_readl(&ffb->dczb); /* Depth Cue Scale Z Back */ ++ ctx->blendc = upa_readl(&ffb->blendc); /* Alpha Blend Control */ ++ ctx->blendc1 = upa_readl(&ffb->blendc1); /* Alpha Blend Color 1 */ ++ ctx->blendc2 = upa_readl(&ffb->blendc2); /* Alpha Blend Color 2 */ ++ ctx->fbc = upa_readl(&ffb->fbc); /* Frame Buffer Control */ ++ ctx->rop = upa_readl(&ffb->rop); /* Raster Operation */ ++ ctx->cmp = upa_readl(&ffb->cmp); /* Compare Controls */ ++ ctx->matchab = upa_readl(&ffb->matchab); /* Buffer A/B Match Ops */ ++ ctx->matchc = upa_readl(&ffb->matchc); /* Buffer C Match Ops */ ++ ctx->magnab = upa_readl(&ffb->magnab); /* Buffer A/B Magnitude Ops */ ++ ctx->magnc = upa_readl(&ffb->magnc); /* Buffer C Magnitude Ops */ ++ ctx->pmask = upa_readl(&ffb->pmask); /* RGB Plane Mask */ ++ ctx->xpmask = upa_readl(&ffb->xpmask); /* X Plane Mask */ ++ ctx->ypmask = upa_readl(&ffb->ypmask); /* Y Plane Mask */ ++ ctx->zpmask = upa_readl(&ffb->zpmask); /* Z Plane Mask */ ++ ++ /* Auxiliary Clips. */ ++ ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min); ++ ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max); ++ ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min); ++ ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max); ++ ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min); ++ ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max); ++ ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min); ++ ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max); ++ ++ ctx->lpat = upa_readl(&ffb->lpat); /* Line Pattern */ ++ ctx->fontxy = upa_readl(&ffb->fontxy); /* XY Font Coordinate */ ++ ctx->fontw = upa_readl(&ffb->fontw); /* Font Width */ ++ ctx->fontinc = upa_readl(&ffb->fontinc); /* Font X/Y Increment */ ++ ++ /* These registers/features only exist on FFB2 and later chips. */ ++ if (fpriv->ffb_type >= ffb2_prototype) { ++ ctx->dcss1 = upa_readl(&ffb->dcss1); /* Depth Cue Scale Slope 1 */ ++ ctx->dcss2 = upa_readl(&ffb->dcss2); /* Depth Cue Scale Slope 2 */ ++ ctx->dcss2 = upa_readl(&ffb->dcss3); /* Depth Cue Scale Slope 3 */ ++ ctx->dcs2 = upa_readl(&ffb->dcs2); /* Depth Cue Scale 2 */ ++ ctx->dcs3 = upa_readl(&ffb->dcs3); /* Depth Cue Scale 3 */ ++ ctx->dcs4 = upa_readl(&ffb->dcs4); /* Depth Cue Scale 4 */ ++ ctx->dcd2 = upa_readl(&ffb->dcd2); /* Depth Cue Depth 2 */ ++ ctx->dcd3 = upa_readl(&ffb->dcd3); /* Depth Cue Depth 3 */ ++ ctx->dcd4 = upa_readl(&ffb->dcd4); /* Depth Cue Depth 4 */ ++ ++ /* And stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ ctx->stencil = upa_readl(&ffb->stencil); ++ ctx->stencilctl = upa_readl(&ffb->stencilctl); ++ } ++ } ++ ++ /* Save the 32x32 area pattern. */ ++ for (i = 0; i < 32; i++) ++ ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]); ++ ++ /* Finally, stash away the User Constol/Status Register. */ ++ ctx->ucsr = upa_readl(&ffb->ucsr); ++} ++ ++static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx) ++{ ++ ffb_fbcPtr ffb = fpriv->regs; ++ struct ffb_hw_context *ctx; ++ int i; ++ ++ ctx = fpriv->hw_state[idx - 1]; ++ if (idx == 0 || ctx == NULL) ++ return; ++ ++ if (ctx->is_2d_only) { ++ /* 2D applications only care about certain pieces ++ * of state. ++ */ ++ upa_writel(ctx->drawop, &ffb->drawop); ++ ++ /* If we were restoring the vertex registers, this is where ++ * we would do it. We would restore 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ upa_writel(ctx->ppc, &ffb->ppc); ++ upa_writel(ctx->wid, &ffb->wid); ++ upa_writel(ctx->fg, &ffb->fg); ++ upa_writel(ctx->bg, &ffb->bg); ++ upa_writel(ctx->xclip, &ffb->xclip); ++ upa_writel(ctx->fbc, &ffb->fbc); ++ upa_writel(ctx->rop, &ffb->rop); ++ upa_writel(ctx->cmp, &ffb->cmp); ++ upa_writel(ctx->matchab, &ffb->matchab); ++ upa_writel(ctx->magnab, &ffb->magnab); ++ upa_writel(ctx->pmask, &ffb->pmask); ++ upa_writel(ctx->xpmask, &ffb->xpmask); ++ upa_writel(ctx->lpat, &ffb->lpat); ++ upa_writel(ctx->fontxy, &ffb->fontxy); ++ upa_writel(ctx->fontw, &ffb->fontw); ++ upa_writel(ctx->fontinc, &ffb->fontinc); ++ ++ /* stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ upa_writel(ctx->stencil, &ffb->stencil); ++ upa_writel(ctx->stencilctl, &ffb->stencilctl); ++ upa_writel(0x80000000, &ffb->fbc); ++ upa_writel((ctx->stencilctl | 0x80000), ++ &ffb->rawstencilctl); ++ upa_writel(ctx->fbc, &ffb->fbc); ++ } ++ ++ for (i = 0; i < 32; i++) ++ upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); ++ upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); ++ return; ++ } ++ ++ /* Restore drawop. */ ++ upa_writel(ctx->drawop, &ffb->drawop); ++ ++ /* If we were restoring the vertex registers, this is where ++ * we would do it. We would restore 32 32-bit words starting ++ * at ffb->suvtx. ++ */ ++ ++ /* Restore rendering attributes. */ ++ ++ upa_writel(ctx->ppc, &ffb->ppc); /* Pixel Processor Control */ ++ upa_writel(ctx->wid, &ffb->wid); /* Current WID */ ++ upa_writel(ctx->fg, &ffb->fg); /* Constant FG color */ ++ upa_writel(ctx->bg, &ffb->bg); /* Constant BG color */ ++ upa_writel(ctx->consty, &ffb->consty); /* Constant Y */ ++ upa_writel(ctx->constz, &ffb->constz); /* Constant Z */ ++ upa_writel(ctx->xclip, &ffb->xclip); /* X plane clip */ ++ upa_writel(ctx->dcss, &ffb->dcss); /* Depth Cue Scale Slope */ ++ upa_writel(ctx->vclipmin, &ffb->vclipmin); /* Primary XY clip, minimum */ ++ upa_writel(ctx->vclipmax, &ffb->vclipmax); /* Primary XY clip, maximum */ ++ upa_writel(ctx->vclipzmin, &ffb->vclipzmin); /* Primary Z clip, minimum */ ++ upa_writel(ctx->vclipzmax, &ffb->vclipzmax); /* Primary Z clip, maximum */ ++ upa_writel(ctx->dcsf, &ffb->dcsf); /* Depth Cue Scale Front Bound */ ++ upa_writel(ctx->dcsb, &ffb->dcsb); /* Depth Cue Scale Back Bound */ ++ upa_writel(ctx->dczf, &ffb->dczf); /* Depth Cue Scale Z Front */ ++ upa_writel(ctx->dczb, &ffb->dczb); /* Depth Cue Scale Z Back */ ++ upa_writel(ctx->blendc, &ffb->blendc); /* Alpha Blend Control */ ++ upa_writel(ctx->blendc1, &ffb->blendc1); /* Alpha Blend Color 1 */ ++ upa_writel(ctx->blendc2, &ffb->blendc2); /* Alpha Blend Color 2 */ ++ upa_writel(ctx->fbc, &ffb->fbc); /* Frame Buffer Control */ ++ upa_writel(ctx->rop, &ffb->rop); /* Raster Operation */ ++ upa_writel(ctx->cmp, &ffb->cmp); /* Compare Controls */ ++ upa_writel(ctx->matchab, &ffb->matchab); /* Buffer A/B Match Ops */ ++ upa_writel(ctx->matchc, &ffb->matchc); /* Buffer C Match Ops */ ++ upa_writel(ctx->magnab, &ffb->magnab); /* Buffer A/B Magnitude Ops */ ++ upa_writel(ctx->magnc, &ffb->magnc); /* Buffer C Magnitude Ops */ ++ upa_writel(ctx->pmask, &ffb->pmask); /* RGB Plane Mask */ ++ upa_writel(ctx->xpmask, &ffb->xpmask); /* X Plane Mask */ ++ upa_writel(ctx->ypmask, &ffb->ypmask); /* Y Plane Mask */ ++ upa_writel(ctx->zpmask, &ffb->zpmask); /* Z Plane Mask */ ++ ++ /* Auxiliary Clips. */ ++ upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min); ++ upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max); ++ upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min); ++ upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max); ++ upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min); ++ upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max); ++ upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min); ++ upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max); ++ ++ upa_writel(ctx->lpat, &ffb->lpat); /* Line Pattern */ ++ upa_writel(ctx->fontxy, &ffb->fontxy); /* XY Font Coordinate */ ++ upa_writel(ctx->fontw, &ffb->fontw); /* Font Width */ ++ upa_writel(ctx->fontinc, &ffb->fontinc); /* Font X/Y Increment */ ++ ++ /* These registers/features only exist on FFB2 and later chips. */ ++ if (fpriv->ffb_type >= ffb2_prototype) { ++ upa_writel(ctx->dcss1, &ffb->dcss1); /* Depth Cue Scale Slope 1 */ ++ upa_writel(ctx->dcss2, &ffb->dcss2); /* Depth Cue Scale Slope 2 */ ++ upa_writel(ctx->dcss3, &ffb->dcss2); /* Depth Cue Scale Slope 3 */ ++ upa_writel(ctx->dcs2, &ffb->dcs2); /* Depth Cue Scale 2 */ ++ upa_writel(ctx->dcs3, &ffb->dcs3); /* Depth Cue Scale 3 */ ++ upa_writel(ctx->dcs4, &ffb->dcs4); /* Depth Cue Scale 4 */ ++ upa_writel(ctx->dcd2, &ffb->dcd2); /* Depth Cue Depth 2 */ ++ upa_writel(ctx->dcd3, &ffb->dcd3); /* Depth Cue Depth 3 */ ++ upa_writel(ctx->dcd4, &ffb->dcd4); /* Depth Cue Depth 4 */ ++ ++ /* And stencil/stencilctl only exists on FFB2+ and later ++ * due to the introduction of 3DRAM-III. ++ */ ++ if (fpriv->ffb_type == ffb2_vertical_plus || ++ fpriv->ffb_type == ffb2_horizontal_plus) { ++ /* Unfortunately, there is a hardware bug on ++ * the FFB2+ chips which prevents a normal write ++ * to the stencil control register from working ++ * as it should. ++ * ++ * The state controlled by the FFB stencilctl register ++ * really gets transferred to the per-buffer instances ++ * of the stencilctl register in the 3DRAM chips. ++ * ++ * The bug is that FFB does not update buffer C correctly, ++ * so we have to do it by hand for them. ++ */ ++ ++ /* This will update buffers A and B. */ ++ upa_writel(ctx->stencil, &ffb->stencil); ++ upa_writel(ctx->stencilctl, &ffb->stencilctl); ++ ++ /* Force FFB to use buffer C 3dram regs. */ ++ upa_writel(0x80000000, &ffb->fbc); ++ upa_writel((ctx->stencilctl | 0x80000), ++ &ffb->rawstencilctl); ++ ++ /* Now restore the correct FBC controls. */ ++ upa_writel(ctx->fbc, &ffb->fbc); ++ } ++ } ++ ++ /* Restore the 32x32 area pattern. */ ++ for (i = 0; i < 32; i++) ++ upa_writel(ctx->area_pattern[i], &ffb->pattern[i]); ++ ++ /* Finally, stash away the User Constol/Status Register. ++ * The only state we really preserve here is the picking ++ * control. ++ */ ++ upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr); ++} ++ ++#define FFB_UCSR_FB_BUSY 0x01000000 ++#define FFB_UCSR_RP_BUSY 0x02000000 ++#define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY) ++ ++static void FFBWait(ffb_fbcPtr ffb) ++{ ++ int limit = 100000; ++ ++ do { ++ u32 regval = upa_readl(&ffb->ucsr); ++ ++ if ((regval & FFB_UCSR_ALL_BUSY) == 0) ++ break; ++ } while (--limit); ++} ++ ++int ffb_context_switch(struct drm_device * dev, int old, int new) { ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ ++#if DRM_DMA_HISTOGRAM ++ dev->ctx_start = get_cycles(); ++#endif ++ ++ DRM_DEBUG("Context switch from %d to %d\n", old, new); ++ ++ if (new == dev->last_context || dev->last_context == 0) { ++ dev->last_context = new; ++ return 0; ++ } ++ ++ FFBWait(fpriv->regs); ++ ffb_save_context(fpriv, old); ++ ffb_restore_context(fpriv, old, new); ++ FFBWait(fpriv->regs); ++ ++ dev->last_context = new; ++ ++ return 0; ++} ++ ++int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_res_t res; ++ drm_ctx_t ctx; ++ int i; ++ ++ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); ++ if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res))) ++ return -EFAULT; ++ if (res.count >= DRM_RESERVED_CONTEXTS) { ++ memset(&ctx, 0, sizeof(ctx)); ++ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ++ ctx.handle = i; ++ if (copy_to_user(&res.contexts[i], &i, sizeof(i))) ++ return -EFAULT; ++ } ++ } ++ res.count = DRM_RESERVED_CONTEXTS; ++ if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res))) ++ return -EFAULT; ++ return 0; ++} ++ ++int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ idx = ffb_alloc_queue(dev, (ctx.flags & _DRM_CONTEXT_2DONLY)); ++ if (idx < 0) ++ return -ENFILE; ++ ++ DRM_DEBUG("%d\n", ctx.handle); ++ ctx.handle = idx; ++ if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) ++ return -EFAULT; ++ return 0; ++} ++ ++int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ struct ffb_hw_context *hwctx; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ ++ idx = ctx.handle; ++ if (idx <= 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ hwctx = fpriv->hw_state[idx - 1]; ++ if (hwctx == NULL) ++ return -EINVAL; ++ ++ if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0) ++ hwctx->is_2d_only = 0; ++ else ++ hwctx->is_2d_only = 1; ++ ++ return 0; ++} ++ ++int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ struct ffb_hw_context *hwctx; ++ drm_ctx_t ctx; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ ++ idx = ctx.handle; ++ if (idx <= 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ hwctx = fpriv->hw_state[idx - 1]; ++ if (hwctx == NULL) ++ return -EINVAL; ++ ++ if (hwctx->is_2d_only != 0) ++ ctx.flags = _DRM_CONTEXT_2DONLY; ++ else ++ ctx.flags = 0; ++ ++ if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ drm_ctx_t ctx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ return ffb_context_switch(dev, dev->last_context, ctx.handle); ++} ++ ++int ffb_newctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_t ctx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ ++ return 0; ++} ++ ++int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, ++ unsigned long arg) { ++ drm_ctx_t ctx; ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev = priv->dev; ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int idx; ++ ++ if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) ++ return -EFAULT; ++ DRM_DEBUG("%d\n", ctx.handle); ++ ++ idx = ctx.handle - 1; ++ if (idx < 0 || idx >= FFB_MAX_CTXS) ++ return -EINVAL; ++ ++ if (fpriv->hw_state[idx] != NULL) { ++ kfree(fpriv->hw_state[idx]); ++ fpriv->hw_state[idx] = NULL; ++ } ++ return 0; ++} ++ ++static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev) ++{ ++ ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; ++ int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); ++ int idx; ++ ++ idx = context - 1; ++ if (fpriv && ++ context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) { ++ kfree(fpriv->hw_state[idx]); ++ fpriv->hw_state[idx] = NULL; ++ } ++} ++ ++static void ffb_driver_lastclose(struct drm_device * dev) ++{ ++ if (dev->dev_private) ++ kfree(dev->dev_private); ++} ++ ++static void ffb_driver_unload(struct drm_device * dev) ++{ ++ if (ffb_position != NULL) ++ kfree(ffb_position); ++} ++ ++static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev) ++{ ++ dev->lock.filp = 0; ++ { ++ __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; ++ unsigned int old, new, prev, ctx; ++ ++ ctx = lock.context; ++ do { ++ old = *plock; ++ new = ctx; ++ prev = cmpxchg(plock, old, new); ++ } while (prev != old); ++ } ++ wake_up_interruptible(&dev->lock.lock_queue); ++} ++ ++unsigned long ffb_driver_get_map_ofs(drm_map_t * map) ++{ ++ return (map->offset & 0xffffffff); ++} ++ ++unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev) ++{ ++ ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; ++ ++ if (ffb_priv) ++ return ffb_priv->card_phys_base; ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.c git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c +--- git/drivers/gpu/drm-tungsten/ffb_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,329 @@ ++/* $Id$ ++ * ffb_drv.c: Creator/Creator3D direct rendering driver. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "drmP.h" ++#include "ffb_drv.h" ++ ++#define DRIVER_AUTHOR "David S. Miller" ++ ++#define DRIVER_NAME "ffb" ++#define DRIVER_DESC "Creator/Creator3D" ++#define DRIVER_DATE "20000517" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 1 ++ ++typedef struct _ffb_position_t { ++ int node; ++ int root; ++} ffb_position_t; ++ ++static ffb_position_t *ffb_position; ++ ++static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance) ++{ ++ volatile unsigned char *strap_bits; ++ unsigned char val; ++ ++ strap_bits = (volatile unsigned char *) ++ (ffb_priv->card_phys_base + 0x00200000UL); ++ ++ /* Don't ask, you have to read the value twice for whatever ++ * reason to get correct contents. ++ */ ++ val = upa_readb(strap_bits); ++ val = upa_readb(strap_bits); ++ switch (val & 0x78) { ++ case (0x0 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb1_prototype; ++ printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance); ++ break; ++ case (0x0 << 5) | (0x1 << 3): ++ ffb_priv->ffb_type = ffb1_standard; ++ printk("ffb%d: Detected FFB1\n", instance); ++ break; ++ case (0x0 << 5) | (0x3 << 3): ++ ffb_priv->ffb_type = ffb1_speedsort; ++ printk("ffb%d: Detected FFB1-SpeedSort\n", instance); ++ break; ++ case (0x1 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb2_prototype; ++ printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance); ++ break; ++ case (0x1 << 5) | (0x1 << 3): ++ ffb_priv->ffb_type = ffb2_vertical; ++ printk("ffb%d: Detected FFB2/vertical\n", instance); ++ break; ++ case (0x1 << 5) | (0x2 << 3): ++ ffb_priv->ffb_type = ffb2_vertical_plus; ++ printk("ffb%d: Detected FFB2+/vertical\n", instance); ++ break; ++ case (0x2 << 5) | (0x0 << 3): ++ ffb_priv->ffb_type = ffb2_horizontal; ++ printk("ffb%d: Detected FFB2/horizontal\n", instance); ++ break; ++ case (0x2 << 5) | (0x2 << 3): ++ ffb_priv->ffb_type = ffb2_horizontal; ++ printk("ffb%d: Detected FFB2+/horizontal\n", instance); ++ break; ++ default: ++ ffb_priv->ffb_type = ffb2_vertical; ++ printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val); ++ break; ++ }; ++} ++ ++static void ffb_apply_upa_parent_ranges(int parent, ++ struct linux_prom64_registers *regs) ++{ ++ struct linux_prom64_ranges ranges[PROMREG_MAX]; ++ char name[128]; ++ int len, i; ++ ++ prom_getproperty(parent, "name", name, sizeof(name)); ++ if (strcmp(name, "upa") != 0) ++ return; ++ ++ len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges)); ++ if (len <= 0) ++ return; ++ ++ len /= sizeof(struct linux_prom64_ranges); ++ for (i = 0; i < len; i++) { ++ struct linux_prom64_ranges *rng = &ranges[i]; ++ u64 phys_addr = regs->phys_addr; ++ ++ if (phys_addr >= rng->ot_child_base && ++ phys_addr < (rng->ot_child_base + rng->or_size)) { ++ regs->phys_addr -= rng->ot_child_base; ++ regs->phys_addr += rng->ot_parent_base; ++ return; ++ } ++ } ++ ++ return; ++} ++ ++static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node, ++ int instance) ++{ ++ struct linux_prom64_registers regs[2*PROMREG_MAX]; ++ ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private; ++ int i; ++ ++ ffb_priv->prom_node = prom_node; ++ if (prom_getproperty(ffb_priv->prom_node, "reg", ++ (void *)regs, sizeof(regs)) <= 0) { ++ return -EINVAL; ++ } ++ ffb_apply_upa_parent_ranges(parent_node, ®s[0]); ++ ffb_priv->card_phys_base = regs[0].phys_addr; ++ ffb_priv->regs = (ffb_fbcPtr) ++ (regs[0].phys_addr + 0x00600000UL); ++ get_ffb_type(ffb_priv, instance); ++ for (i = 0; i < FFB_MAX_CTXS; i++) ++ ffb_priv->hw_state[i] = NULL; ++ ++ return 0; ++} ++ ++static int __init ffb_count_siblings(int root) ++{ ++ int node, child, count = 0; ++ ++ child = prom_getchild(root); ++ for (node = prom_searchsiblings(child, "SUNW,ffb"); node; ++ node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) ++ count++; ++ ++ return count; ++} ++ ++static int __init ffb_scan_siblings(int root, int instance) ++{ ++ int node, child; ++ ++ child = prom_getchild(root); ++ for (node = prom_searchsiblings(child, "SUNW,ffb"); node; ++ node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) { ++ ffb_position[instance].node = node; ++ ffb_position[instance].root = root; ++ instance++; ++ } ++ ++ return instance; ++} ++ ++static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) ++{ ++ drm_file_t *priv = filp->private_data; ++ struct drm_device *dev; ++ drm_map_list_t *r_list; ++ struct list_head *list; ++ drm_map_t *map; ++ ++ if (!priv || (dev = priv->dev) == NULL) ++ return NULL; ++ ++ list_for_each(list, &dev->maplist->head) { ++ unsigned long uoff; ++ ++ r_list = (drm_map_list_t *)list; ++ map = r_list->map; ++ if (!map) ++ continue; ++ uoff = (map->offset & 0xffffffff); ++ if (uoff == off) ++ return map; ++ } ++ ++ return NULL; ++} ++ ++unsigned long ffb_get_unmapped_area(struct file *filp, ++ unsigned long hint, ++ unsigned long len, ++ unsigned long pgoff, ++ unsigned long flags) ++{ ++ drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT); ++ unsigned long addr = -ENOMEM; ++ ++ if (!map) ++ return get_unmapped_area(NULL, hint, len, pgoff, flags); ++ ++ if (map->type == _DRM_FRAME_BUFFER || ++ map->type == _DRM_REGISTERS) { ++#ifdef HAVE_ARCH_FB_UNMAPPED_AREA ++ addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags); ++#else ++ addr = get_unmapped_area(NULL, hint, len, pgoff, flags); ++#endif ++ } else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) { ++ unsigned long slack = SHMLBA - PAGE_SIZE; ++ ++ addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags); ++ if (!(addr & ~PAGE_MASK)) { ++ unsigned long kvirt = (unsigned long) map->handle; ++ ++ if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { ++ unsigned long koff, aoff; ++ ++ koff = kvirt & (SHMLBA - 1); ++ aoff = addr & (SHMLBA - 1); ++ if (koff < aoff) ++ koff += SHMLBA; ++ ++ addr += (koff - aoff); ++ } ++ } ++ } else { ++ addr = get_unmapped_area(NULL, hint, len, pgoff, flags); ++ } ++ ++ return addr; ++} ++ ++/* This functions must be here since it references drm_numdevs) ++ * which drm_drv.h declares. ++ */ ++static int ffb_driver_firstopen(struct drm_device *dev) ++{ ++ ffb_dev_priv_t *ffb_priv; ++ struct drm_device *temp_dev; ++ int ret = 0; ++ int i; ++ ++ /* Check for the case where no device was found. */ ++ if (ffb_position == NULL) ++ return -ENODEV; ++ ++ /* Find our instance number by finding our device in dev structure */ ++ for (i = 0; i < drm_numdevs; i++) { ++ temp_dev = &(drm_device[i]); ++ if(temp_dev == dev) ++ break; ++ } ++ ++ if (i == drm_numdevs) ++ return -ENODEV; ++ ++ ffb_priv = kmalloc(sizeof(ffb_dev_priv_t), GFP_KERNEL); ++ if (!ffb_priv) ++ return -ENOMEM; ++ memset(ffb_priv, 0, sizeof(*ffb_priv)); ++ dev->dev_private = ffb_priv; ++ ++ ret = ffb_init_one(dev, ++ ffb_position[i].node, ++ ffb_position[i].root, ++ i); ++ return ret; ++} ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ ffb_PCI_IDS ++}; ++ ++static struct drm_driver ffb_driver = { ++ .release = ffb_driver_reclaim_buffers_locked, ++ .firstopen = ffb_driver_firstopen, ++ .lastclose = ffb_driver_lastclose, ++ .unload = ffb_driver_unload, ++ .kernel_context_switch = ffb_context_switch, ++ .kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock, ++ .get_map_ofs = ffb_driver_get_map_ofs, ++ .get_reg_ofs = ffb_driver_get_reg_ofs, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .fasync = drm_fasync, ++ .poll = drm_poll, ++ .get_unmapped_area = ffb_get_unmapped_area, ++ }, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_probe(pdev, ent, &driver); ++} ++ ++static struct pci_driver pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++}; ++ ++static int __init ffb_init(void) ++{ ++ return drm_init(&pci_driver, pciidlist, &driver); ++} ++ ++static void __exit ffb_exit(void) ++{ ++ drm_exit(&pci_driver); ++} ++ ++module_init(ffb_init); ++module_exit(ffb_exit)); ++ ++MODULE_AUTHOR( DRIVER_AUTHOR ); ++MODULE_DESCRIPTION( DRIVER_DESC ); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.h git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h +--- git/drivers/gpu/drm-tungsten/ffb_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,284 @@ ++/* $Id$ ++ * ffb_drv.h: Creator/Creator3D direct rendering driver. ++ * ++ * Copyright (C) 2000 David S. Miller (davem@redhat.com) ++ */ ++ ++/* Auxilliary clips. */ ++typedef struct { ++ volatile unsigned int min; ++ volatile unsigned int max; ++} ffb_auxclip, *ffb_auxclipPtr; ++ ++/* FFB register set. */ ++typedef struct _ffb_fbc { ++ /* Next vertex registers, on the right we list which drawops ++ * use said register and the logical name the register has in ++ * that context. ++ */ /* DESCRIPTION DRAWOP(NAME) */ ++/*0x00*/unsigned int pad1[3]; /* Reserved */ ++/*0x0c*/volatile unsigned int alpha; /* ALPHA Transparency */ ++/*0x10*/volatile unsigned int red; /* RED */ ++/*0x14*/volatile unsigned int green; /* GREEN */ ++/*0x18*/volatile unsigned int blue; /* BLUE */ ++/*0x1c*/volatile unsigned int z; /* DEPTH */ ++/*0x20*/volatile unsigned int y; /* Y triangle(DOYF) */ ++ /* aadot(DYF) */ ++ /* ddline(DYF) */ ++ /* aaline(DYF) */ ++/*0x24*/volatile unsigned int x; /* X triangle(DOXF) */ ++ /* aadot(DXF) */ ++ /* ddline(DXF) */ ++ /* aaline(DXF) */ ++/*0x28*/unsigned int pad2[2]; /* Reserved */ ++/*0x30*/volatile unsigned int ryf; /* Y (alias to DOYF) ddline(RYF) */ ++ /* aaline(RYF) */ ++ /* triangle(RYF) */ ++/*0x34*/volatile unsigned int rxf; /* X ddline(RXF) */ ++ /* aaline(RXF) */ ++ /* triangle(RXF) */ ++/*0x38*/unsigned int pad3[2]; /* Reserved */ ++/*0x40*/volatile unsigned int dmyf; /* Y (alias to DOYF) triangle(DMYF) */ ++/*0x44*/volatile unsigned int dmxf; /* X triangle(DMXF) */ ++/*0x48*/unsigned int pad4[2]; /* Reserved */ ++/*0x50*/volatile unsigned int ebyi; /* Y (alias to RYI) polygon(EBYI) */ ++/*0x54*/volatile unsigned int ebxi; /* X polygon(EBXI) */ ++/*0x58*/unsigned int pad5[2]; /* Reserved */ ++/*0x60*/volatile unsigned int by; /* Y brline(RYI) */ ++ /* fastfill(OP) */ ++ /* polygon(YI) */ ++ /* rectangle(YI) */ ++ /* bcopy(SRCY) */ ++ /* vscroll(SRCY) */ ++/*0x64*/volatile unsigned int bx; /* X brline(RXI) */ ++ /* polygon(XI) */ ++ /* rectangle(XI) */ ++ /* bcopy(SRCX) */ ++ /* vscroll(SRCX) */ ++ /* fastfill(GO) */ ++/*0x68*/volatile unsigned int dy; /* destination Y fastfill(DSTY) */ ++ /* bcopy(DSRY) */ ++ /* vscroll(DSRY) */ ++/*0x6c*/volatile unsigned int dx; /* destination X fastfill(DSTX) */ ++ /* bcopy(DSTX) */ ++ /* vscroll(DSTX) */ ++/*0x70*/volatile unsigned int bh; /* Y (alias to RYI) brline(DYI) */ ++ /* dot(DYI) */ ++ /* polygon(ETYI) */ ++ /* Height fastfill(H) */ ++ /* bcopy(H) */ ++ /* vscroll(H) */ ++ /* Y count fastfill(NY) */ ++/*0x74*/volatile unsigned int bw; /* X dot(DXI) */ ++ /* brline(DXI) */ ++ /* polygon(ETXI) */ ++ /* fastfill(W) */ ++ /* bcopy(W) */ ++ /* vscroll(W) */ ++ /* fastfill(NX) */ ++/*0x78*/unsigned int pad6[2]; /* Reserved */ ++/*0x80*/unsigned int pad7[32]; /* Reserved */ ++ ++ /* Setup Unit's vertex state register */ ++/*100*/ volatile unsigned int suvtx; ++/*104*/ unsigned int pad8[63]; /* Reserved */ ++ ++ /* Frame Buffer Control Registers */ ++/*200*/ volatile unsigned int ppc; /* Pixel Processor Control */ ++/*204*/ volatile unsigned int wid; /* Current WID */ ++/*208*/ volatile unsigned int fg; /* FG data */ ++/*20c*/ volatile unsigned int bg; /* BG data */ ++/*210*/ volatile unsigned int consty; /* Constant Y */ ++/*214*/ volatile unsigned int constz; /* Constant Z */ ++/*218*/ volatile unsigned int xclip; /* X Clip */ ++/*21c*/ volatile unsigned int dcss; /* Depth Cue Scale Slope */ ++/*220*/ volatile unsigned int vclipmin; /* Viewclip XY Min Bounds */ ++/*224*/ volatile unsigned int vclipmax; /* Viewclip XY Max Bounds */ ++/*228*/ volatile unsigned int vclipzmin; /* Viewclip Z Min Bounds */ ++/*22c*/ volatile unsigned int vclipzmax; /* Viewclip Z Max Bounds */ ++/*230*/ volatile unsigned int dcsf; /* Depth Cue Scale Front Bound */ ++/*234*/ volatile unsigned int dcsb; /* Depth Cue Scale Back Bound */ ++/*238*/ volatile unsigned int dczf; /* Depth Cue Z Front */ ++/*23c*/ volatile unsigned int dczb; /* Depth Cue Z Back */ ++/*240*/ unsigned int pad9; /* Reserved */ ++/*244*/ volatile unsigned int blendc; /* Alpha Blend Control */ ++/*248*/ volatile unsigned int blendc1; /* Alpha Blend Color 1 */ ++/*24c*/ volatile unsigned int blendc2; /* Alpha Blend Color 2 */ ++/*250*/ volatile unsigned int fbramitc; /* FB RAM Interleave Test Control */ ++/*254*/ volatile unsigned int fbc; /* Frame Buffer Control */ ++/*258*/ volatile unsigned int rop; /* Raster OPeration */ ++/*25c*/ volatile unsigned int cmp; /* Frame Buffer Compare */ ++/*260*/ volatile unsigned int matchab; /* Buffer AB Match Mask */ ++/*264*/ volatile unsigned int matchc; /* Buffer C(YZ) Match Mask */ ++/*268*/ volatile unsigned int magnab; /* Buffer AB Magnitude Mask */ ++/*26c*/ volatile unsigned int magnc; /* Buffer C(YZ) Magnitude Mask */ ++/*270*/ volatile unsigned int fbcfg0; /* Frame Buffer Config 0 */ ++/*274*/ volatile unsigned int fbcfg1; /* Frame Buffer Config 1 */ ++/*278*/ volatile unsigned int fbcfg2; /* Frame Buffer Config 2 */ ++/*27c*/ volatile unsigned int fbcfg3; /* Frame Buffer Config 3 */ ++/*280*/ volatile unsigned int ppcfg; /* Pixel Processor Config */ ++/*284*/ volatile unsigned int pick; /* Picking Control */ ++/*288*/ volatile unsigned int fillmode; /* FillMode */ ++/*28c*/ volatile unsigned int fbramwac; /* FB RAM Write Address Control */ ++/*290*/ volatile unsigned int pmask; /* RGB PlaneMask */ ++/*294*/ volatile unsigned int xpmask; /* X PlaneMask */ ++/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */ ++/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */ ++/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */ ++ ++ /* New 3dRAM III support regs */ ++/*2c0*/ volatile unsigned int rawblend2; ++/*2c4*/ volatile unsigned int rawpreblend; ++/*2c8*/ volatile unsigned int rawstencil; ++/*2cc*/ volatile unsigned int rawstencilctl; ++/*2d0*/ volatile unsigned int threedram1; ++/*2d4*/ volatile unsigned int threedram2; ++/*2d8*/ volatile unsigned int passin; ++/*2dc*/ volatile unsigned int rawclrdepth; ++/*2e0*/ volatile unsigned int rawpmask; ++/*2e4*/ volatile unsigned int rawcsrc; ++/*2e8*/ volatile unsigned int rawmatch; ++/*2ec*/ volatile unsigned int rawmagn; ++/*2f0*/ volatile unsigned int rawropblend; ++/*2f4*/ volatile unsigned int rawcmp; ++/*2f8*/ volatile unsigned int rawwac; ++/*2fc*/ volatile unsigned int fbramid; ++ ++/*300*/ volatile unsigned int drawop; /* Draw OPeration */ ++/*304*/ unsigned int pad10[2]; /* Reserved */ ++/*30c*/ volatile unsigned int lpat; /* Line Pattern control */ ++/*310*/ unsigned int pad11; /* Reserved */ ++/*314*/ volatile unsigned int fontxy; /* XY Font coordinate */ ++/*318*/ volatile unsigned int fontw; /* Font Width */ ++/*31c*/ volatile unsigned int fontinc; /* Font Increment */ ++/*320*/ volatile unsigned int font; /* Font bits */ ++/*324*/ unsigned int pad12[3]; /* Reserved */ ++/*330*/ volatile unsigned int blend2; ++/*334*/ volatile unsigned int preblend; ++/*338*/ volatile unsigned int stencil; ++/*33c*/ volatile unsigned int stencilctl; ++ ++/*340*/ unsigned int pad13[4]; /* Reserved */ ++/*350*/ volatile unsigned int dcss1; /* Depth Cue Scale Slope 1 */ ++/*354*/ volatile unsigned int dcss2; /* Depth Cue Scale Slope 2 */ ++/*358*/ volatile unsigned int dcss3; /* Depth Cue Scale Slope 3 */ ++/*35c*/ volatile unsigned int widpmask; ++/*360*/ volatile unsigned int dcs2; ++/*364*/ volatile unsigned int dcs3; ++/*368*/ volatile unsigned int dcs4; ++/*36c*/ unsigned int pad14; /* Reserved */ ++/*370*/ volatile unsigned int dcd2; ++/*374*/ volatile unsigned int dcd3; ++/*378*/ volatile unsigned int dcd4; ++/*37c*/ unsigned int pad15; /* Reserved */ ++/*380*/ volatile unsigned int pattern[32]; /* area Pattern */ ++/*400*/ unsigned int pad16[8]; /* Reserved */ ++/*420*/ volatile unsigned int reset; /* chip RESET */ ++/*424*/ unsigned int pad17[247]; /* Reserved */ ++/*800*/ volatile unsigned int devid; /* Device ID */ ++/*804*/ unsigned int pad18[63]; /* Reserved */ ++/*900*/ volatile unsigned int ucsr; /* User Control & Status Register */ ++/*904*/ unsigned int pad19[31]; /* Reserved */ ++/*980*/ volatile unsigned int mer; /* Mode Enable Register */ ++/*984*/ unsigned int pad20[1439]; /* Reserved */ ++} ffb_fbc, *ffb_fbcPtr; ++ ++struct ffb_hw_context { ++ int is_2d_only; ++ ++ unsigned int ppc; ++ unsigned int wid; ++ unsigned int fg; ++ unsigned int bg; ++ unsigned int consty; ++ unsigned int constz; ++ unsigned int xclip; ++ unsigned int dcss; ++ unsigned int vclipmin; ++ unsigned int vclipmax; ++ unsigned int vclipzmin; ++ unsigned int vclipzmax; ++ unsigned int dcsf; ++ unsigned int dcsb; ++ unsigned int dczf; ++ unsigned int dczb; ++ unsigned int blendc; ++ unsigned int blendc1; ++ unsigned int blendc2; ++ unsigned int fbc; ++ unsigned int rop; ++ unsigned int cmp; ++ unsigned int matchab; ++ unsigned int matchc; ++ unsigned int magnab; ++ unsigned int magnc; ++ unsigned int pmask; ++ unsigned int xpmask; ++ unsigned int ypmask; ++ unsigned int zpmask; ++ unsigned int auxclip0min; ++ unsigned int auxclip0max; ++ unsigned int auxclip1min; ++ unsigned int auxclip1max; ++ unsigned int auxclip2min; ++ unsigned int auxclip2max; ++ unsigned int auxclip3min; ++ unsigned int auxclip3max; ++ unsigned int drawop; ++ unsigned int lpat; ++ unsigned int fontxy; ++ unsigned int fontw; ++ unsigned int fontinc; ++ unsigned int area_pattern[32]; ++ unsigned int ucsr; ++ unsigned int stencil; ++ unsigned int stencilctl; ++ unsigned int dcss1; ++ unsigned int dcss2; ++ unsigned int dcss3; ++ unsigned int dcs2; ++ unsigned int dcs3; ++ unsigned int dcs4; ++ unsigned int dcd2; ++ unsigned int dcd3; ++ unsigned int dcd4; ++ unsigned int mer; ++}; ++ ++#define FFB_MAX_CTXS 32 ++ ++enum ffb_chip_type { ++ ffb1_prototype = 0, /* Early pre-FCS FFB */ ++ ffb1_standard, /* First FCS FFB, 100Mhz UPA, 66MHz gclk */ ++ ffb1_speedsort, /* Second FCS FFB, 100Mhz UPA, 75MHz gclk */ ++ ffb2_prototype, /* Early pre-FCS vertical FFB2 */ ++ ffb2_vertical, /* First FCS FFB2/vertical, 100Mhz UPA, 100MHZ gclk, ++ 75(SingleBuffer)/83(DoubleBuffer) MHz fclk */ ++ ffb2_vertical_plus, /* Second FCS FFB2/vertical, same timings */ ++ ffb2_horizontal, /* First FCS FFB2/horizontal, same timings as FFB2/vert */ ++ ffb2_horizontal_plus, /* Second FCS FFB2/horizontal, same timings */ ++ afb_m3, /* FCS Elite3D, 3 float chips */ ++ afb_m6 /* FCS Elite3D, 6 float chips */ ++}; ++ ++typedef struct ffb_dev_priv { ++ /* Misc software state. */ ++ int prom_node; ++ enum ffb_chip_type ffb_type; ++ u64 card_phys_base; ++ struct miscdevice miscdev; ++ ++ /* Controller registers. */ ++ ffb_fbcPtr regs; ++ ++ /* Context table. */ ++ struct ffb_hw_context *hw_state[FFB_MAX_CTXS]; ++} ffb_dev_priv_t; ++ ++extern unsigned long ffb_get_unmapped_area(struct file *filp, ++ unsigned long hint, ++ unsigned long len, ++ unsigned long pgoff, ++ unsigned long flags); ++extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) ++extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev) +diff -Nurd git/drivers/gpu/drm-tungsten/i810_dma.c git-nokia/drivers/gpu/drm-tungsten/i810_dma.c +--- git/drivers/gpu/drm-tungsten/i810_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i810_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1301 @@ ++/* i810_dma.c -- DMA support for the i810 -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Rickard E. (Rik) Faith ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ */ ++ ++#include /* For task queue support */ ++#include ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i810_drm.h" ++#include "i810_drv.h" ++ ++#define I810_BUF_FREE 2 ++#define I810_BUF_CLIENT 1 ++#define I810_BUF_HARDWARE 0 ++ ++#define I810_BUF_UNMAPPED 0 ++#define I810_BUF_MAPPED 1 ++ ++static inline void i810_print_status_page(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ u32 *temp = dev_priv->hw_status_page; ++ int i; ++ ++ DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]); ++ DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]); ++ DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]); ++ DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]); ++ DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]); ++ DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]); ++ for (i = 6; i < dma->buf_count + 6; i++) { ++ DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]); ++ } ++} ++ ++static struct drm_buf *i810_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ int used; ++ ++ /* Linear search might not be the best solution */ ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ /* In use is already a pointer */ ++ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, ++ I810_BUF_CLIENT); ++ if (used == I810_BUF_FREE) { ++ return buf; ++ } ++ } ++ return NULL; ++} ++ ++/* This should only be called if the buffer is not sent to the hardware ++ * yet, the hardware updates in use for us once its on the ring buffer. ++ */ ++ ++static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ int used; ++ ++ /* In use is already a pointer */ ++ used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); ++ if (used != I810_BUF_CLIENT) { ++ DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) ++{ ++ struct drm_file *priv = filp->private_data; ++ struct drm_device *dev; ++ drm_i810_private_t *dev_priv; ++ struct drm_buf *buf; ++ drm_i810_buf_priv_t *buf_priv; ++ ++ lock_kernel(); ++ dev = priv->minor->dev; ++ dev_priv = dev->dev_private; ++ buf = dev_priv->mmap_buffer; ++ buf_priv = buf->dev_private; ++ ++ vma->vm_flags |= (VM_IO | VM_DONTCOPY); ++ vma->vm_file = filp; ++ ++ buf_priv->currently_mapped = I810_BUF_MAPPED; ++ unlock_kernel(); ++ ++ if (io_remap_pfn_range(vma, vma->vm_start, ++ vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ return -EAGAIN; ++ return 0; ++} ++ ++static const struct file_operations i810_buffer_fops = { ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = i810_mmap_buffers, ++ .fasync = drm_fasync, ++}; ++ ++static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ const struct file_operations *old_fops; ++ int retcode = 0; ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) ++ return -EINVAL; ++ ++ down_write(¤t->mm->mmap_sem); ++ old_fops = file_priv->filp->f_op; ++ file_priv->filp->f_op = &i810_buffer_fops; ++ dev_priv->mmap_buffer = buf; ++ buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED, buf->bus_address); ++ dev_priv->mmap_buffer = NULL; ++ file_priv->filp->f_op = old_fops; ++ if (IS_ERR(buf_priv->virtual)) { ++ /* Real error */ ++ DRM_ERROR("mmap error\n"); ++ retcode = PTR_ERR(buf_priv->virtual); ++ buf_priv->virtual = NULL; ++ } ++ up_write(¤t->mm->mmap_sem); ++ ++ return retcode; ++} ++ ++static int i810_unmap_buffer(struct drm_buf * buf) ++{ ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ int retcode = 0; ++ ++ if (buf_priv->currently_mapped != I810_BUF_MAPPED) ++ return -EINVAL; ++ ++ down_write(¤t->mm->mmap_sem); ++ retcode = do_munmap(current->mm, ++ (unsigned long)buf_priv->virtual, ++ (size_t) buf->total); ++ up_write(¤t->mm->mmap_sem); ++ ++ buf_priv->currently_mapped = I810_BUF_UNMAPPED; ++ buf_priv->virtual = NULL; ++ ++ return retcode; ++} ++ ++static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, ++ struct drm_file *file_priv) ++{ ++ struct drm_buf *buf; ++ drm_i810_buf_priv_t *buf_priv; ++ int retcode = 0; ++ ++ buf = i810_freelist_get(dev); ++ if (!buf) { ++ retcode = -ENOMEM; ++ DRM_DEBUG("retcode=%d\n", retcode); ++ return retcode; ++ } ++ ++ retcode = i810_map_buffer(buf, file_priv); ++ if (retcode) { ++ i810_freelist_put(dev, buf); ++ DRM_ERROR("mapbuf failed, retcode %d\n", retcode); ++ return retcode; ++ } ++ buf->file_priv = file_priv; ++ buf_priv = buf->dev_private; ++ d->granted = 1; ++ d->request_idx = buf->idx; ++ d->request_size = buf->total; ++ d->virtual = buf_priv->virtual; ++ ++ return retcode; ++} ++ ++static int i810_dma_cleanup(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ int i; ++ drm_i810_private_t *dev_priv = ++ (drm_i810_private_t *) dev->dev_private; ++ ++ if (dev_priv->ring.virtual_start) { ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ } ++ if (dev_priv->hw_status_page) { ++ pci_free_consistent(dev->pdev, PAGE_SIZE, ++ dev_priv->hw_status_page, ++ dev_priv->dma_status_page); ++ /* Need to rewrite hardware status page */ ++ I810_WRITE(0x02080, 0x1ffff000); ++ } ++ drm_free(dev->dev_private, sizeof(drm_i810_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf_priv->kernel_virtual && buf->total) ++ drm_core_ioremapfree(&buf_priv->map, dev); ++ } ++ } ++ return 0; ++} ++ ++static int i810_wait_ring(struct drm_device * dev, int n) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_ring_buffer_t *ring = &(dev_priv->ring); ++ int iters = 0; ++ unsigned long end; ++ unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ++ end = jiffies + (HZ * 3); ++ while (ring->space < n) { ++ ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++ ++ if (ring->head != last_head) { ++ end = jiffies + (HZ * 3); ++ last_head = ring->head; ++ } ++ ++ iters++; ++ if (time_before(end, jiffies)) { ++ DRM_ERROR("space: %d wanted %d\n", ring->space, n); ++ DRM_ERROR("lockup\n"); ++ goto out_wait_ring; ++ } ++ udelay(1); ++ } ++ ++ out_wait_ring: ++ return iters; ++} ++ ++static void i810_kernel_lost_context(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_ring_buffer_t *ring = &(dev_priv->ring); ++ ++ ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ++ ring->tail = I810_READ(LP_RING + RING_TAIL); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++} ++ ++static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int my_idx = 24; ++ u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); ++ int i; ++ ++ if (dma->buf_count > 1019) { ++ /* Not enough space in the status page for the freelist */ ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ buf_priv->in_use = hw_status++; ++ buf_priv->my_use_idx = my_idx; ++ my_idx += 4; ++ ++ *buf_priv->in_use = I810_BUF_FREE; ++ ++ buf_priv->map.offset = buf->bus_address; ++ buf_priv->map.size = buf->total; ++ buf_priv->map.type = _DRM_AGP; ++ buf_priv->map.flags = 0; ++ buf_priv->map.mtrr = 0; ++ ++ drm_core_ioremap(&buf_priv->map, dev); ++ buf_priv->kernel_virtual = buf_priv->map.handle; ++ ++ } ++ return 0; ++} ++ ++static int i810_dma_initialize(struct drm_device * dev, ++ drm_i810_private_t * dev_priv, ++ drm_i810_init_t * init) ++{ ++ struct drm_map_list *r_list; ++ memset(dev_priv, 0, sizeof(drm_i810_private_t)); ++ ++ list_for_each_entry(r_list, &dev->maplist, head) { ++ if (r_list->map && ++ r_list->map->type == _DRM_SHM && ++ r_list->map->flags & _DRM_CONTAINS_LOCK) { ++ dev_priv->sarea_map = r_list->map; ++ break; ++ } ++ } ++ if (!dev_priv->sarea_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find sarea!\n"); ++ return -EINVAL; ++ } ++ dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find mmio map!\n"); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not find dma buffer map!\n"); ++ return -EINVAL; ++ } ++ ++ dev_priv->sarea_priv = (drm_i810_sarea_t *) ++ ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset); ++ ++ dev_priv->ring.Start = init->ring_start; ++ dev_priv->ring.End = init->ring_end; ++ dev_priv->ring.Size = init->ring_size; ++ ++ dev_priv->ring.map.offset = dev->agp->base + init->ring_start; ++ dev_priv->ring.map.size = init->ring_size; ++ dev_priv->ring.map.type = _DRM_AGP; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ ++ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; ++ ++ dev_priv->w = init->w; ++ dev_priv->h = init->h; ++ dev_priv->pitch = init->pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->front_offset = init->front_offset; ++ ++ dev_priv->overlay_offset = init->overlay_offset; ++ dev_priv->overlay_physical = init->overlay_physical; ++ ++ dev_priv->front_di1 = init->front_offset | init->pitch_bits; ++ dev_priv->back_di1 = init->back_offset | init->pitch_bits; ++ dev_priv->zi1 = init->depth_offset | init->pitch_bits; ++ ++ /* Program Hardware Status Page */ ++ dev_priv->hw_status_page = ++ pci_alloc_consistent(dev->pdev, PAGE_SIZE, ++ &dev_priv->dma_status_page); ++ if (!dev_priv->hw_status_page) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("Can not allocate hardware status page\n"); ++ return -ENOMEM; ++ } ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); ++ ++ I810_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ ++ /* Now we need to init our freelist */ ++ if (i810_freelist_init(dev, dev_priv) != 0) { ++ dev->dev_private = (void *)dev_priv; ++ i810_dma_cleanup(dev); ++ DRM_ERROR("Not enough space in the status page for" ++ " the freelist\n"); ++ return -ENOMEM; ++ } ++ dev->dev_private = (void *)dev_priv; ++ ++ return 0; ++} ++ ++static int i810_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv; ++ drm_i810_init_t *init = data; ++ int retcode = 0; ++ ++ switch (init->func) { ++ case I810_INIT_DMA_1_4: ++ DRM_INFO("Using v1.4 init.\n"); ++ dev_priv = drm_alloc(sizeof(drm_i810_private_t), ++ DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ retcode = i810_dma_initialize(dev, dev_priv, init); ++ break; ++ ++ case I810_CLEANUP_DMA: ++ DRM_INFO("DMA Cleanup\n"); ++ retcode = i810_dma_cleanup(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return retcode; ++} ++ ++/* Most efficient way to verify state for the i810 is as it is ++ * emitted. Non-conformant state is silently dropped. ++ * ++ * Use 'volatile' & local var tmp to force the emitted values to be ++ * identical to the verified ones. ++ */ ++static void i810EmitContextVerified(struct drm_device * dev, ++ volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int i, j = 0; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_CTX_SETUP_SIZE); ++ ++ OUT_RING(GFX_OP_COLOR_FACTOR); ++ OUT_RING(code[I810_CTXREG_CF1]); ++ ++ OUT_RING(GFX_OP_STIPPLE); ++ OUT_RING(code[I810_CTXREG_ST1]); ++ ++ for (i = 4; i < I810_CTX_SETUP_SIZE; i++) { ++ tmp = code[i]; ++ ++ if ((tmp & (7 << 29)) == (3 << 29) && ++ (tmp & (0x1f << 24)) < (0x1d << 24)) { ++ OUT_RING(tmp); ++ j++; ++ } else ++ printk("constext state dropped!!!\n"); ++ } ++ ++ if (j & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int i, j = 0; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_TEX_SETUP_SIZE); ++ ++ OUT_RING(GFX_OP_MAP_INFO); ++ OUT_RING(code[I810_TEXREG_MI1]); ++ OUT_RING(code[I810_TEXREG_MI2]); ++ OUT_RING(code[I810_TEXREG_MI3]); ++ ++ for (i = 4; i < I810_TEX_SETUP_SIZE; i++) { ++ tmp = code[i]; ++ ++ if ((tmp & (7 << 29)) == (3 << 29) && ++ (tmp & (0x1f << 24)) < (0x1d << 24)) { ++ OUT_RING(tmp); ++ j++; ++ } else ++ printk("texture state dropped!!!\n"); ++ } ++ ++ if (j & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++/* Need to do some additional checking when setting the dest buffer. ++ */ ++static void i810EmitDestVerified(struct drm_device * dev, ++ volatile unsigned int *code) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ unsigned int tmp; ++ RING_LOCALS; ++ ++ BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); ++ ++ tmp = code[I810_DESTREG_DI1]; ++ if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) { ++ OUT_RING(CMD_OP_DESTBUFFER_INFO); ++ OUT_RING(tmp); ++ } else ++ DRM_DEBUG("bad di1 %x (allow %x or %x)\n", ++ tmp, dev_priv->front_di1, dev_priv->back_di1); ++ ++ /* invarient: ++ */ ++ OUT_RING(CMD_OP_Z_BUFFER_INFO); ++ OUT_RING(dev_priv->zi1); ++ ++ OUT_RING(GFX_OP_DESTBUFFER_VARS); ++ OUT_RING(code[I810_DESTREG_DV1]); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(code[I810_DESTREG_DR1]); ++ OUT_RING(code[I810_DESTREG_DR2]); ++ OUT_RING(code[I810_DESTREG_DR3]); ++ OUT_RING(code[I810_DESTREG_DR4]); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++} ++ ++static void i810EmitState(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ DRM_DEBUG("%x\n", dirty); ++ ++ if (dirty & I810_UPLOAD_BUFFERS) { ++ i810EmitDestVerified(dev, sarea_priv->BufferState); ++ sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS; ++ } ++ ++ if (dirty & I810_UPLOAD_CTX) { ++ i810EmitContextVerified(dev, sarea_priv->ContextState); ++ sarea_priv->dirty &= ~I810_UPLOAD_CTX; ++ } ++ ++ if (dirty & I810_UPLOAD_TEX0) { ++ i810EmitTexVerified(dev, sarea_priv->TexState[0]); ++ sarea_priv->dirty &= ~I810_UPLOAD_TEX0; ++ } ++ ++ if (dirty & I810_UPLOAD_TEX1) { ++ i810EmitTexVerified(dev, sarea_priv->TexState[1]); ++ sarea_priv->dirty &= ~I810_UPLOAD_TEX1; ++ } ++} ++ ++/* need to verify ++ */ ++static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, ++ unsigned int clear_color, ++ unsigned int clear_zval) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int pitch = dev_priv->pitch; ++ int cpp = 2; ++ int i; ++ RING_LOCALS; ++ ++ if (dev_priv->current_page == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(I810_FRONT | I810_BACK); ++ if (tmp & I810_FRONT) ++ flags |= I810_BACK; ++ if (tmp & I810_BACK) ++ flags |= I810_FRONT; ++ } ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ for (i = 0; i < nbox; i++, pbox++) { ++ unsigned int x = pbox->x1; ++ unsigned int y = pbox->y1; ++ unsigned int width = (pbox->x2 - x) * cpp; ++ unsigned int height = pbox->y2 - y; ++ unsigned int start = y * pitch + x * cpp; ++ ++ if (pbox->x1 > pbox->x2 || ++ pbox->y1 > pbox->y2 || ++ pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) ++ continue; ++ ++ if (flags & I810_FRONT) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(start); ++ OUT_RING(clear_color); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ if (flags & I810_BACK) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(dev_priv->back_offset + start); ++ OUT_RING(clear_color); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ if (flags & I810_DEPTH) { ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3); ++ OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch); ++ OUT_RING((height << 16) | width); ++ OUT_RING(dev_priv->depth_offset + start); ++ OUT_RING(clear_zval); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ } ++} ++ ++static void i810_dma_dispatch_swap(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int pitch = dev_priv->pitch; ++ int cpp = 2; ++ int i; ++ RING_LOCALS; ++ ++ DRM_DEBUG("swapbuffers\n"); ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ for (i = 0; i < nbox; i++, pbox++) { ++ unsigned int w = pbox->x2 - pbox->x1; ++ unsigned int h = pbox->y2 - pbox->y1; ++ unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch; ++ unsigned int start = dst; ++ ++ if (pbox->x1 > pbox->x2 || ++ pbox->y1 > pbox->y2 || ++ pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h) ++ continue; ++ ++ BEGIN_LP_RING(6); ++ OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4); ++ OUT_RING(pitch | (0xCC << 16)); ++ OUT_RING((h << 16) | (w * cpp)); ++ if (dev_priv->current_page == 0) ++ OUT_RING(dev_priv->front_offset + start); ++ else ++ OUT_RING(dev_priv->back_offset + start); ++ OUT_RING(pitch); ++ if (dev_priv->current_page == 0) ++ OUT_RING(dev_priv->back_offset + start); ++ else ++ OUT_RING(dev_priv->front_offset + start); ++ ADVANCE_LP_RING(); ++ } ++} ++ ++static void i810_dma_dispatch_vertex(struct drm_device * dev, ++ struct drm_buf * buf, int discard, int used) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_clip_rect *box = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ unsigned long address = (unsigned long)buf->bus_address; ++ unsigned long start = address - dev->agp->base; ++ int i = 0; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ if (nbox > I810_NR_SAREA_CLIPRECTS) ++ nbox = I810_NR_SAREA_CLIPRECTS; ++ ++ if (used > 4 * 1024) ++ used = 0; ++ ++ if (sarea_priv->dirty) ++ i810EmitState(dev); ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) { ++ unsigned int prim = (sarea_priv->vertex_prim & PR_MASK); ++ ++ *(u32 *) buf_priv->kernel_virtual = ++ ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2))); ++ ++ if (used & 4) { ++ *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0; ++ used += 4; ++ } ++ ++ i810_unmap_buffer(buf); ++ } ++ ++ if (used) { ++ do { ++ if (i < nbox) { ++ BEGIN_LP_RING(4); ++ OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR | ++ SC_ENABLE); ++ OUT_RING(GFX_OP_SCISSOR_INFO); ++ OUT_RING(box[i].x1 | (box[i].y1 << 16)); ++ OUT_RING((box[i].x2 - ++ 1) | ((box[i].y2 - 1) << 16)); ++ ADVANCE_LP_RING(); ++ } ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(CMD_OP_BATCH_BUFFER); ++ OUT_RING(start | BB1_PROTECTED); ++ OUT_RING(start + used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ } while (++i < nbox); ++ } ++ ++ if (discard) { ++ dev_priv->counter++; ++ ++ (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, ++ I810_BUF_HARDWARE); ++ ++ BEGIN_LP_RING(8); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(20); ++ OUT_RING(dev_priv->counter); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(buf_priv->my_use_idx); ++ OUT_RING(I810_BUF_FREE); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++} ++ ++static void i810_dma_dispatch_flip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ int pitch = dev_priv->pitch; ++ RING_LOCALS; ++ ++ DRM_DEBUG("page=%d pfCurrentPage=%d\n", ++ dev_priv->current_page, ++ dev_priv->sarea_priv->pf_current_page); ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2); ++ /* On i815 at least ASYNC is buggy */ ++ /* pitch<<5 is from 11.2.8 p158, ++ its the pitch / 8 then left shifted 8, ++ so (pitch >> 3) << 8 */ ++ OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ ); ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_offset); ++ dev_priv->current_page = 1; ++ } else { ++ OUT_RING(dev_priv->front_offset); ++ dev_priv->current_page = 0; ++ } ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++ ++} ++ ++static void i810_dma_quiescent(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ i810_wait_ring(dev, dev_priv->ring.Size - 8); ++} ++ ++static int i810_flush_queue(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ int i, ret = 0; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(CMD_REPORT_HEAD); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ i810_wait_ring(dev, dev_priv->ring.Size - 8); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, ++ I810_BUF_FREE); ++ ++ if (used == I810_BUF_HARDWARE) ++ DRM_DEBUG("reclaimed from HARDWARE\n"); ++ if (used == I810_BUF_CLIENT) ++ DRM_DEBUG("still on client\n"); ++ } ++ ++ return ret; ++} ++ ++/* Must be called with the lock held */ ++static void i810_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ if (!dma) ++ return; ++ if (!dev->dev_private) ++ return; ++ if (!dma->buflist) ++ return; ++ ++ i810_flush_queue(dev); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf->file_priv == file_priv && buf_priv) { ++ int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, ++ I810_BUF_FREE); ++ ++ if (used == I810_BUF_CLIENT) ++ DRM_DEBUG("reclaimed from client\n"); ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) ++ buf_priv->currently_mapped = I810_BUF_UNMAPPED; ++ } ++ } ++} ++ ++static int i810_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ i810_flush_queue(dev); ++ return 0; ++} ++ ++static int i810_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i810_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("idx %d used %d discard %d\n", ++ vertex->idx, vertex->used, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx > dma->buf_count) ++ return -EINVAL; ++ ++ i810_dma_dispatch_vertex(dev, ++ dma->buflist[vertex->idx], ++ vertex->discard, vertex->used); ++ ++ atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ sarea_priv->last_enqueue = dev_priv->counter - 1; ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return 0; ++} ++ ++static int i810_clear_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_clear_t *clear = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* GH: Someone's doing nasty things... */ ++ if (!dev->dev_private) { ++ return -EINVAL; ++ } ++ ++ i810_dma_dispatch_clear(dev, clear->flags, ++ clear->clear_color, clear->clear_depth); ++ return 0; ++} ++ ++static int i810_swap_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ i810_dma_dispatch_swap(dev); ++ return 0; ++} ++ ++static int i810_getage(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ return 0; ++} ++ ++static int i810_getbuf(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ int retcode = 0; ++ drm_i810_dma_t *d = data; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ d->granted = 0; ++ ++ retcode = i810_dma_get_buffer(dev, d, file_priv); ++ ++ DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", ++ current->pid, retcode, d->granted); ++ ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return retcode; ++} ++ ++static int i810_copybuf(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ /* Never copy - 2.4.x doesn't need it */ ++ return 0; ++} ++ ++static int i810_docopy(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ /* Never copy - 2.4.x doesn't need it */ ++ return 0; ++} ++ ++static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, ++ unsigned int last_render) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ drm_i810_buf_priv_t *buf_priv = buf->dev_private; ++ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned long address = (unsigned long)buf->bus_address; ++ unsigned long start = address - dev->agp->base; ++ int u; ++ RING_LOCALS; ++ ++ i810_kernel_lost_context(dev); ++ ++ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); ++ if (u != I810_BUF_CLIENT) { ++ DRM_DEBUG("MC found buffer that isn't mine!\n"); ++ } ++ ++ if (used > 4 * 1024) ++ used = 0; ++ ++ sarea_priv->dirty = 0x7f; ++ ++ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used); ++ ++ dev_priv->counter++; ++ DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); ++ DRM_DEBUG("start : %lx\n", start); ++ DRM_DEBUG("used : %d\n", used); ++ DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); ++ ++ if (buf_priv->currently_mapped == I810_BUF_MAPPED) { ++ if (used & 4) { ++ *(u32 *) ((char *) buf_priv->virtual + used) = 0; ++ used += 4; ++ } ++ ++ i810_unmap_buffer(buf); ++ } ++ BEGIN_LP_RING(4); ++ OUT_RING(CMD_OP_BATCH_BUFFER); ++ OUT_RING(start | BB1_PROTECTED); ++ OUT_RING(start + used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ BEGIN_LP_RING(8); ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(buf_priv->my_use_idx); ++ OUT_RING(I810_BUF_FREE); ++ OUT_RING(0); ++ ++ OUT_RING(CMD_STORE_DWORD_IDX); ++ OUT_RING(16); ++ OUT_RING(last_render); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++static int i810_dma_mc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ u32 *hw_status = dev_priv->hw_status_page; ++ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i810_mc_t *mc = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (mc->idx >= dma->buf_count || mc->idx < 0) ++ return -EINVAL; ++ ++ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, ++ mc->last_render); ++ ++ atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ sarea_priv->last_enqueue = dev_priv->counter - 1; ++ sarea_priv->last_dispatch = (int)hw_status[5]; ++ ++ return 0; ++} ++ ++static int i810_rstatus(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ return (int)(((u32 *) (dev_priv->hw_status_page))[4]); ++} ++ ++static int i810_ov0_info(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ drm_i810_overlay_t *ov = data; ++ ++ ov->offset = dev_priv->overlay_offset; ++ ov->physical = dev_priv->overlay_physical; ++ ++ return 0; ++} ++ ++static int i810_fstatus(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ return I810_READ(0x30008); ++} ++ ++static int i810_ov0_flip(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ //Tell the overlay to update ++ I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); ++ ++ return 0; ++} ++ ++/* Not sure why this isn't set all the time: ++ */ ++static void i810_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ dev_priv->page_flipping = 1; ++ dev_priv->current_page = 0; ++ dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; ++} ++ ++static int i810_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ if (dev_priv->current_page != 0) ++ i810_dma_dispatch_flip(dev); ++ ++ dev_priv->page_flipping = 0; ++ return 0; ++} ++ ++static int i810_flip_bufs(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv->page_flipping) ++ i810_do_init_pageflip(dev); ++ ++ i810_dma_dispatch_flip(dev); ++ return 0; ++} ++ ++int i810_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ /* i810 has 4 more counters */ ++ dev->counters += 4; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ dev->types[9] = _DRM_STAT_DMA; ++ ++ return 0; ++} ++ ++void i810_driver_lastclose(struct drm_device * dev) ++{ ++ i810_dma_cleanup(dev); ++} ++ ++void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_i810_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->page_flipping) { ++ i810_do_cleanup_pageflip(dev); ++ } ++ } ++} ++ ++void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv) ++{ ++ i810_reclaim_buffers(dev, file_priv); ++} ++ ++int i810_driver_dma_quiescent(struct drm_device * dev) ++{ ++ i810_dma_quiescent(dev); ++ return 0; ++} ++ ++struct drm_ioctl_desc i810_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) ++}; ++ ++int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * All Intel graphics chipsets are treated as AGP, even if they are really ++ * PCI-e. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * A value of 1 is always retured to indictate every i810 is AGP. ++ */ ++int i810_driver_device_is_agp(struct drm_device * dev) ++{ ++ return 1; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i810_drm.h git-nokia/drivers/gpu/drm-tungsten/i810_drm.h +--- git/drivers/gpu/drm-tungsten/i810_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i810_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,263 @@ ++#ifndef _I810_DRM_H_ ++#define _I810_DRM_H_ ++ ++/* WARNING: These defines must be the same as what the Xserver uses. ++ * if you change them, you must change the defines in the Xserver. ++ */ ++ ++#ifndef _I810_DEFINES_ ++#define _I810_DEFINES_ ++ ++#define I810_DMA_BUF_ORDER 12 ++#define I810_DMA_BUF_SZ (1< ++ * Jeff Hartmann ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i810_drm.h" ++#include "i810_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ i810_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ ++ DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, ++ .dev_priv_size = sizeof(drm_i810_buf_priv_t), ++ .load = i810_driver_load, ++ .lastclose = i810_driver_lastclose, ++ .preclose = i810_driver_preclose, ++ .device_is_agp = i810_driver_device_is_agp, ++ .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, ++ .dma_quiescent = i810_driver_dma_quiescent, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = i810_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init i810_init(void) ++{ ++ driver.num_ioctls = i810_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit i810_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(i810_init); ++module_exit(i810_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/i810_drv.h git-nokia/drivers/gpu/drm-tungsten/i810_drv.h +--- git/drivers/gpu/drm-tungsten/i810_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i810_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,242 @@ ++/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Rickard E. (Rik) Faith ++ * Jeff Hartmann ++ * ++ */ ++ ++#ifndef _I810_DRV_H_ ++#define _I810_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "VA Linux Systems Inc." ++ ++#define DRIVER_NAME "i810" ++#define DRIVER_DESC "Intel i810" ++#define DRIVER_DATE "20030605" ++ ++/* Interface history ++ * ++ * 1.1 - XFree86 4.1 ++ * 1.2 - XvMC interfaces ++ * - XFree86 4.2 ++ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility) ++ * - Remove requirement for interrupt (leave stubs again) ++ * 1.3 - Add page flipping. ++ * 1.4 - fix DRM interface ++ */ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 4 ++#define DRIVER_PATCHLEVEL 0 ++ ++typedef struct drm_i810_buf_priv { ++ u32 *in_use; ++ int my_use_idx; ++ int currently_mapped; ++ void *virtual; ++ void *kernel_virtual; ++ drm_local_map_t map; ++} drm_i810_buf_priv_t; ++ ++typedef struct _drm_i810_ring_buffer { ++ int tail_mask; ++ unsigned long Start; ++ unsigned long End; ++ unsigned long Size; ++ u8 *virtual_start; ++ int head; ++ int tail; ++ int space; ++ drm_local_map_t map; ++} drm_i810_ring_buffer_t; ++ ++typedef struct drm_i810_private { ++ struct drm_map *sarea_map; ++ struct drm_map *mmio_map; ++ ++ drm_i810_sarea_t *sarea_priv; ++ drm_i810_ring_buffer_t ring; ++ ++ void *hw_status_page; ++ unsigned long counter; ++ ++ dma_addr_t dma_status_page; ++ ++ struct drm_buf *mmap_buffer; ++ ++ u32 front_di1, back_di1, zi1; ++ ++ int back_offset; ++ int depth_offset; ++ int overlay_offset; ++ int overlay_physical; ++ int w, h; ++ int pitch; ++ int back_pitch; ++ int depth_pitch; ++ ++ int do_boxes; ++ int dma_used; ++ ++ int current_page; ++ int page_flipping; ++ ++ wait_queue_head_t irq_queue; ++ atomic_t irq_received; ++ atomic_t irq_emitted; ++ ++ int front_offset; ++} drm_i810_private_t; ++ ++ /* i810_dma.c */ ++extern int i810_driver_dma_quiescent(struct drm_device * dev); ++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern int i810_driver_load(struct drm_device *, unsigned long flags); ++extern void i810_driver_lastclose(struct drm_device * dev); ++extern void i810_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern int i810_driver_device_is_agp(struct drm_device * dev); ++ ++extern struct drm_ioctl_desc i810_ioctls[]; ++extern int i810_max_ioctl; ++ ++#define I810_BASE(reg) ((unsigned long) \ ++ dev_priv->mmio_map->handle) ++#define I810_ADDR(reg) (I810_BASE(reg) + reg) ++#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) ++#define I810_READ(reg) I810_DEREF(reg) ++#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) ++#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) ++#define I810_READ16(reg) I810_DEREF16(reg) ++#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) ++ ++#define I810_VERBOSE 0 ++#define RING_LOCALS unsigned int outring, ringmask; \ ++ volatile char *virt; ++ ++#define BEGIN_LP_RING(n) do { \ ++ if (I810_VERBOSE) \ ++ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ ++ if (dev_priv->ring.space < n*4) \ ++ i810_wait_ring(dev, n*4); \ ++ dev_priv->ring.space -= n*4; \ ++ outring = dev_priv->ring.tail; \ ++ ringmask = dev_priv->ring.tail_mask; \ ++ virt = dev_priv->ring.virtual_start; \ ++} while (0) ++ ++#define ADVANCE_LP_RING() do { \ ++ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ ++ dev_priv->ring.tail = outring; \ ++ I810_WRITE(LP_RING + RING_TAIL, outring); \ ++} while(0) ++ ++#define OUT_RING(n) do { \ ++ if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ ++ *(volatile unsigned int *)(virt + outring) = n; \ ++ outring += 4; \ ++ outring &= ringmask; \ ++} while (0) ++ ++#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) ++#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) ++#define CMD_REPORT_HEAD (7<<23) ++#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) ++#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) ++ ++#define INST_PARSER_CLIENT 0x00000000 ++#define INST_OP_FLUSH 0x02000000 ++#define INST_FLUSH_MAP_CACHE 0x00000001 ++ ++#define BB1_START_ADDR_MASK (~0x7) ++#define BB1_PROTECTED (1<<0) ++#define BB1_UNPROTECTED (0<<0) ++#define BB2_END_ADDR_MASK (~0x7) ++ ++#define I810REG_HWSTAM 0x02098 ++#define I810REG_INT_IDENTITY_R 0x020a4 ++#define I810REG_INT_MASK_R 0x020a8 ++#define I810REG_INT_ENABLE_R 0x020a0 ++ ++#define LP_RING 0x2030 ++#define HP_RING 0x2040 ++#define RING_TAIL 0x00 ++#define TAIL_ADDR 0x000FFFF8 ++#define RING_HEAD 0x04 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_START 0x08 ++#define START_ADDR 0x00FFFFF8 ++#define RING_LEN 0x0C ++#define RING_NR_PAGES 0x000FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++ ++#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define SC_UPDATE_SCISSOR (0x1<<1) ++#define SC_ENABLE_MASK (0x1<<0) ++#define SC_ENABLE (0x1<<0) ++ ++#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) ++#define SCI_YMIN_MASK (0xffff<<16) ++#define SCI_XMIN_MASK (0xffff<<0) ++#define SCI_YMAX_MASK (0xffff<<16) ++#define SCI_XMAX_MASK (0xffff<<0) ++ ++#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) ++#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) ++#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2) ++#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) ++#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) ++#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24)) ++ ++#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23)) ++#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23)) ++#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23)) ++#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23)) ++ ++#define BR00_BITBLT_CLIENT 0x40000000 ++#define BR00_OP_COLOR_BLT 0x10000000 ++#define BR00_OP_SRC_COPY_BLT 0x10C00000 ++#define BR13_SOLID_PATTERN 0x80000000 ++ ++#define WAIT_FOR_PLANE_A_SCANLINES (1<<1) ++#define WAIT_FOR_PLANE_A_FLIP (1<<2) ++#define WAIT_FOR_VBLANK (1<<3) ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/i915_buffer.c git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c +--- git/drivers/gpu/drm-tungsten/i915_buffer.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,303 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev) ++{ ++ return drm_agp_init_ttm(dev); ++} ++ ++int i915_fence_type(struct drm_buffer_object *bo, ++ uint32_t *fclass, ++ uint32_t *type) ++{ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) ++ *type = 3; ++ else ++ *type = 1; ++ return 0; ++} ++ ++int i915_invalidate_caches(struct drm_device *dev, uint64_t flags) ++{ ++ /* ++ * FIXME: Only emit once per batchbuffer submission. ++ */ ++ ++ uint32_t flush_cmd = MI_NO_WRITE_FLUSH; ++ ++ if (flags & DRM_BO_FLAG_READ) ++ flush_cmd |= MI_READ_FLUSH; ++ if (flags & DRM_BO_FLAG_EXE) ++ flush_cmd |= MI_EXE_FLUSH; ++ ++ return i915_emit_mi_flush(dev, flush_cmd); ++} ++ ++int i915_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man) ++{ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ man->gpu_offset = 0; ++ break; ++ case DRM_BO_MEM_TT: ++ if (!(drm_core_has_AGP(dev) && dev->agp)) { ++ DRM_ERROR("AGP is not enabled for memory type %u\n", ++ (unsigned)type); ++ return -EINVAL; ++ } ++ man->io_offset = dev->agp->agp_info.aper_base; ++ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ man->gpu_offset = 0; ++ break; ++ case DRM_BO_MEM_PRIV0: ++ if (!(drm_core_has_AGP(dev) && dev->agp)) { ++ DRM_ERROR("AGP is not enabled for memory type %u\n", ++ (unsigned)type); ++ return -EINVAL; ++ } ++ man->io_offset = dev->agp->agp_info.aper_base; ++ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; ++ man->io_addr = NULL; ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ man->gpu_offset = 0; ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/* ++ * i915_evict_flags: ++ * ++ * @bo: the buffer object to be evicted ++ * ++ * Return the bo flags for a buffer which is not mapped to the hardware. ++ * These will be placed in proposed_flags so that when the move is ++ * finished, they'll end up in bo->mem.flags ++ */ ++uint64_t i915_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; ++ } ++} ++ ++#if 0 /* See comment below */ ++ ++static void i915_emit_copy_blit(struct drm_device * dev, ++ uint32_t src_offset, ++ uint32_t dst_offset, ++ uint32_t pages, int direction) ++{ ++ uint32_t cur_pages; ++ uint32_t stride = PAGE_SIZE; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ if (!dev_priv) ++ return; ++ ++ i915_kernel_lost_context(dev); ++ while (pages > 0) { ++ cur_pages = pages; ++ if (cur_pages > 2048) ++ cur_pages = 2048; ++ pages -= cur_pages; ++ ++ BEGIN_LP_RING(6); ++ OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | ++ XY_SRC_COPY_BLT_WRITE_RGB); ++ OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) | ++ (1 << 25) | (direction ? (1 << 30) : 0)); ++ OUT_RING((cur_pages << 16) | PAGE_SIZE); ++ OUT_RING(dst_offset); ++ OUT_RING(stride & 0xffff); ++ OUT_RING(src_offset); ++ ADVANCE_LP_RING(); ++ } ++ return; ++} ++ ++static int i915_move_blit(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ int dir = 0; ++ ++ if ((old_mem->mem_type == new_mem->mem_type) && ++ (new_mem->mm_node->start < ++ old_mem->mm_node->start + old_mem->mm_node->size)) { ++ dir = 1; ++ } ++ ++ i915_emit_copy_blit(bo->dev, ++ old_mem->mm_node->start << PAGE_SHIFT, ++ new_mem->mm_node->start << PAGE_SHIFT, ++ new_mem->num_pages, dir); ++ ++ i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH); ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0, ++ DRM_FENCE_TYPE_EXE | ++ DRM_I915_FENCE_TYPE_RW, ++ DRM_I915_FENCE_FLAG_FLUSHED, new_mem); ++} ++ ++/* ++ * Flip destination ttm into cached-coherent AGP, ++ * then blit and subsequently move out again. ++ */ ++ ++static int i915_move_flip(struct drm_buffer_object * bo, ++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ tmp_mem.mm_node = NULL; ++ tmp_mem.mask = DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_bind_ttm(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = i915_move_blit(bo, 1, no_wait, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ return ret; ++} ++ ++#endif ++ ++/* ++ * Disable i915_move_flip for now, since we can't guarantee that the hardware ++ * lock is held here. To re-enable we need to make sure either ++ * a) The X server is using DRM to submit commands to the ring, or ++ * b) DRM can use the HP ring for these blits. This means i915 needs to ++ * implement a new ring submission mechanism and fence class. ++ */ ++int i915_move(struct drm_buffer_object *bo, ++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } else { ++ if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) ++static inline void clflush(volatile void *__p) ++{ ++ asm volatile("clflush %0" : "+m" (*(char __force *)__p)); ++} ++#endif ++ ++static inline void drm_cache_flush_addr(void *virt) ++{ ++ int i; ++ ++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) ++ clflush(virt+i); ++} ++ ++static inline void drm_cache_flush_page(struct page *p) ++{ ++ drm_cache_flush_addr(page_address(p)); ++} ++ ++void i915_flush_ttm(struct drm_ttm *ttm) ++{ ++ int i; ++ ++ if (!ttm) ++ return; ++ ++ DRM_MEMORYBARRIER(); ++ ++#ifdef CONFIG_X86_32 ++ /* Hopefully nobody has built an x86-64 processor without clflush */ ++ if (!cpu_has_clflush) { ++ wbinvd(); ++ DRM_MEMORYBARRIER(); ++ return; ++ } ++#endif ++ ++ for (i = ttm->num_pages - 1; i >= 0; i--) ++ drm_cache_flush_page(drm_ttm_get_page(ttm, i)); ++ ++ DRM_MEMORYBARRIER(); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_compat.c git-nokia/drivers/gpu/drm-tungsten/i915_compat.c +--- git/drivers/gpu/drm-tungsten/i915_compat.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_compat.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,215 @@ ++#include "drmP.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 ++#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980 ++#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990 ++#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0 ++#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 ++#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 ++#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC ++#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 ++#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 ++#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 ++ ++#define I915_IFPADDR 0x60 ++#define I965_IFPADDR 0x70 ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ++#define upper_32_bits(_val) (((u64)(_val)) >> 32) ++#endif ++ ++static struct _i9xx_private_compat { ++ void __iomem *flush_page; ++ int resource_valid; ++ struct resource ifp_resource; ++} i9xx_private; ++ ++static struct _i8xx_private_compat { ++ void *flush_page; ++ struct page *page; ++} i8xx_private; ++ ++static void ++intel_compat_align_resource(void *data, struct resource *res, ++ resource_size_t size, resource_size_t align) ++{ ++ return; ++} ++ ++ ++static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev) ++{ ++ int ret; ++ ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE, ++ PAGE_SIZE, PCIBIOS_MIN_MEM, 0, ++ intel_compat_align_resource, pdev); ++ if (ret != 0) ++ return ret; ++ ++ return 0; ++} ++ ++static void intel_i915_setup_chipset_flush(struct pci_dev *pdev) ++{ ++ int ret; ++ u32 temp; ++ ++ pci_read_config_dword(pdev, I915_IFPADDR, &temp); ++ if (!(temp & 0x1)) { ++ intel_alloc_chipset_flush_resource(pdev); ++ i9xx_private.resource_valid = 1; ++ pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ temp &= ~1; ++ ++ i9xx_private.resource_valid = 1; ++ i9xx_private.ifp_resource.start = temp; ++ i9xx_private.ifp_resource.end = temp + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); ++ if (ret) { ++ i9xx_private.resource_valid = 0; ++ printk("Failed inserting resource into tree\n"); ++ } ++ } ++} ++ ++static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev) ++{ ++ u32 temp_hi, temp_lo; ++ int ret; ++ ++ pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi); ++ pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo); ++ ++ if (!(temp_lo & 0x1)) { ++ ++ intel_alloc_chipset_flush_resource(pdev); ++ ++ i9xx_private.resource_valid = 1; ++ pci_write_config_dword(pdev, I965_IFPADDR + 4, ++ upper_32_bits(i9xx_private.ifp_resource.start)); ++ pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); ++ } else { ++ u64 l64; ++ ++ temp_lo &= ~0x1; ++ l64 = ((u64)temp_hi << 32) | temp_lo; ++ ++ i9xx_private.resource_valid = 1; ++ i9xx_private.ifp_resource.start = l64; ++ i9xx_private.ifp_resource.end = l64 + PAGE_SIZE; ++ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); ++ if (ret) { ++ i9xx_private.resource_valid = 0; ++ printk("Failed inserting resource into tree\n"); ++ } ++ } ++} ++ ++static void intel_i8xx_fini_flush(struct drm_device *dev) ++{ ++ kunmap(i8xx_private.page); ++ i8xx_private.flush_page = NULL; ++ unmap_page_from_agp(i8xx_private.page); ++ flush_agp_mappings(); ++ ++ __free_page(i8xx_private.page); ++} ++ ++static void intel_i8xx_setup_flush(struct drm_device *dev) ++{ ++ ++ i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); ++ if (!i8xx_private.page) { ++ return; ++ } ++ ++ /* make page uncached */ ++ map_page_into_agp(i8xx_private.page); ++ flush_agp_mappings(); ++ ++ i8xx_private.flush_page = kmap(i8xx_private.page); ++ if (!i8xx_private.flush_page) ++ intel_i8xx_fini_flush(dev); ++} ++ ++ ++static void intel_i8xx_flush_page(struct drm_device *dev) ++{ ++ unsigned int *pg = i8xx_private.flush_page; ++ int i; ++ ++ /* HAI NUT CAN I HAZ HAMMER?? */ ++ for (i = 0; i < 256; i++) ++ *(pg + i) = i; ++ ++ DRM_MEMORYBARRIER(); ++} ++ ++static void intel_i9xx_setup_flush(struct drm_device *dev) ++{ ++ struct pci_dev *agp_dev = dev->agp->agp_info.device; ++ ++ i9xx_private.ifp_resource.name = "GMCH IFPBAR"; ++ i9xx_private.ifp_resource.flags = IORESOURCE_MEM; ++ ++ /* Setup chipset flush for 915 */ ++ if (IS_I965G(dev) || IS_G33(dev)) { ++ intel_i965_g33_setup_chipset_flush(agp_dev); ++ } else { ++ intel_i915_setup_chipset_flush(agp_dev); ++ } ++ ++ if (i9xx_private.ifp_resource.start) { ++ i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE); ++ if (!i9xx_private.flush_page) ++ printk("unable to ioremap flush page - no chipset flushing"); ++ } ++} ++ ++static void intel_i9xx_fini_flush(struct drm_device *dev) ++{ ++ iounmap(i9xx_private.flush_page); ++ if (i9xx_private.resource_valid) ++ release_resource(&i9xx_private.ifp_resource); ++ i9xx_private.resource_valid = 0; ++} ++ ++static void intel_i9xx_flush_page(struct drm_device *dev) ++{ ++ if (i9xx_private.flush_page) ++ writel(1, i9xx_private.flush_page); ++} ++ ++void intel_init_chipset_flush_compat(struct drm_device *dev) ++{ ++ /* not flush on i8xx */ ++ if (IS_I9XX(dev)) ++ intel_i9xx_setup_flush(dev); ++ else ++ intel_i8xx_setup_flush(dev); ++ ++} ++ ++void intel_fini_chipset_flush_compat(struct drm_device *dev) ++{ ++ /* not flush on i8xx */ ++ if (IS_I9XX(dev)) ++ intel_i9xx_fini_flush(dev); ++ else ++ intel_i8xx_fini_flush(dev); ++} ++ ++void drm_agp_chipset_flush(struct drm_device *dev) ++{ ++ if (IS_I9XX(dev)) ++ intel_i9xx_flush_page(dev); ++ else ++ intel_i8xx_flush_page(dev); ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/i915_dma.c git-nokia/drivers/gpu/drm-tungsten/i915_dma.c +--- git/drivers/gpu/drm-tungsten/i915_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1276 @@ ++/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* Really want an OS-independent resettable timer. Would like to have ++ * this loop run for (eg) 3 sec, but have the timer reset every time ++ * the head pointer changes, so that EBUSY only happens if the ring ++ * actually stalls for (eg) 3 seconds. ++ */ ++int i915_wait_ring(struct drm_device * dev, int n, const char *caller) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; ++ u32 last_acthd = I915_READ(acthd_reg); ++ u32 acthd; ++ int i; ++ ++ for (i = 0; i < 100000; i++) { ++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ acthd = I915_READ(acthd_reg); ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++ if (ring->space >= n) ++ return 0; ++ ++ if (ring->head != last_head) ++ i = 0; ++ ++ if (acthd != last_acthd) ++ i = 0; ++ ++ last_head = ring->head; ++ last_acthd = acthd; ++ DRM_UDELAY(10 * 1000); ++ } ++ ++ return -EBUSY; ++} ++ ++int i915_init_hardware_status(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_dma_handle_t *dmah; ++ ++ /* Program Hardware Status Page */ ++#ifdef __FreeBSD__ ++ DRM_UNLOCK(); ++#endif ++ dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); ++#ifdef __FreeBSD__ ++ DRM_LOCK(); ++#endif ++ if (!dmah) { ++ DRM_ERROR("Can not allocate hardware status page\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->status_page_dmah = dmah; ++ dev_priv->hw_status_page = dmah->vaddr; ++ dev_priv->dma_status_page = dmah->busaddr; ++ ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ ++ I915_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ return 0; ++} ++ ++void i915_free_hardware_status(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->status_page_dmah) { ++ drm_pci_free(dev, dev_priv->status_page_dmah); ++ dev_priv->status_page_dmah = NULL; ++ /* Need to rewrite hardware status page */ ++ I915_WRITE(0x02080, 0x1ffff000); ++ } ++ ++ if (dev_priv->status_gfx_addr) { ++ dev_priv->status_gfx_addr = 0; ++ drm_core_ioremapfree(&dev_priv->hws_map, dev); ++ I915_WRITE(0x02080, 0x1ffff000); ++ } ++} ++ ++#if I915_RING_VALIDATE ++/** ++ * Validate the cached ring tail value ++ * ++ * If the X server writes to the ring and DRM doesn't ++ * reload the head and tail pointers, it will end up writing ++ * data to the wrong place in the ring, causing havoc. ++ */ ++void i915_ring_validate(struct drm_device *dev, const char *func, int line) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ u32 tail = I915_READ(PRB0_TAIL) & HEAD_ADDR; ++ u32 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ++ if (tail != ring->tail) { ++ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n", ++ func, line, ++ ring->head, head, ring->tail, tail); ++#ifdef __linux__ ++ BUG_ON(1); ++#endif ++ } ++} ++#endif ++ ++void i915_kernel_lost_context(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ++ ++ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; ++ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; ++ ring->space = ring->head - (ring->tail + 8); ++ if (ring->space < 0) ++ ring->space += ring->Size; ++} ++ ++static int i915_dma_cleanup(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev_priv->ring.virtual_start) { ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ dev_priv->ring.virtual_start = 0; ++ dev_priv->ring.map.handle = 0; ++ dev_priv->ring.map.size = 0; ++ } ++ ++ if (I915_NEED_GFX_HWS(dev)) ++ i915_free_hardware_status(dev); ++ ++ return 0; ++} ++ ++#if defined(I915_HAVE_BUFFER) ++#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16) ++#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff) ++#define DRI2_SAREA_BLOCK_NEXT(p) \ ++ ((void *) ((unsigned char *) (p) + \ ++ DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p))) ++ ++#define DRI2_SAREA_BLOCK_END 0x0000 ++#define DRI2_SAREA_BLOCK_LOCK 0x0001 ++#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002 ++ ++static int ++setup_dri2_sarea(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_i915_init_t * init) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ unsigned int *p, *end, *next; ++ ++ mutex_lock(&dev->struct_mutex); ++ dev_priv->sarea_bo = ++ drm_lookup_buffer_object(file_priv, ++ init->sarea_handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (!dev_priv->sarea_bo) { ++ DRM_ERROR("did not find sarea bo\n"); ++ return -EINVAL; ++ } ++ ++ ret = drm_bo_kmap(dev_priv->sarea_bo, 0, ++ dev_priv->sarea_bo->num_pages, ++ &dev_priv->sarea_kmap); ++ if (ret) { ++ DRM_ERROR("could not map sarea bo\n"); ++ return ret; ++ } ++ ++ p = dev_priv->sarea_kmap.virtual; ++ end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT); ++ while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) { ++ switch (DRI2_SAREA_BLOCK_TYPE(*p)) { ++ case DRI2_SAREA_BLOCK_LOCK: ++ dev->lock.hw_lock = (void *) (p + 1); ++ dev->sigdata.lock = dev->lock.hw_lock; ++ break; ++ } ++ next = DRI2_SAREA_BLOCK_NEXT(p); ++ if (next <= p || end < next) { ++ DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n", ++ next, p, end); ++ return -EINVAL; ++ } ++ p = next; ++ } ++ ++ return 0; ++} ++#endif ++ ++static int i915_initialize(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_i915_init_t * init) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++#if defined(I915_HAVE_BUFFER) ++ int ret; ++#endif ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ i915_dma_cleanup(dev); ++ return -EINVAL; ++ } ++ ++#ifdef I915_HAVE_BUFFER ++ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; ++#endif ++ ++ if (init->sarea_priv_offset) ++ dev_priv->sarea_priv = (drm_i915_sarea_t *) ++ ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ else { ++ /* No sarea_priv for you! */ ++ dev_priv->sarea_priv = NULL; ++ } ++ ++ if (init->ring_size != 0) { ++ dev_priv->ring.Size = init->ring_size; ++ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; ++ ++ dev_priv->ring.map.offset = init->ring_start; ++ dev_priv->ring.map.size = init->ring_size; ++ dev_priv->ring.map.type = 0; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ i915_dma_cleanup(dev); ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ } ++ ++ dev_priv->cpp = init->cpp; ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->pf_current_page = 0; ++ ++ /* We are using separate values as placeholders for mechanisms for ++ * private backbuffer/depthbuffer usage. ++ */ ++ ++ /* Allow hardware batchbuffers unless told otherwise. ++ */ ++ dev_priv->allow_batchbuffer = 1; ++ ++ /* Enable vblank on pipe A for older X servers ++ */ ++ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; ++ ++#ifdef I915_HAVE_BUFFER ++ mutex_init(&dev_priv->cmdbuf_mutex); ++#endif ++#if defined(I915_HAVE_BUFFER) ++ if (init->func == I915_INIT_DMA2) { ++ ret = setup_dri2_sarea(dev, file_priv, init); ++ if (ret) { ++ i915_dma_cleanup(dev); ++ DRM_ERROR("could not set up dri2 sarea\n"); ++ return ret; ++ } ++ } ++#endif ++ ++ return 0; ++} ++ ++static int i915_dma_resume(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->ring.map.handle == NULL) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " ring buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* Program Hardware Status Page */ ++ if (!dev_priv->hw_status_page) { ++ DRM_ERROR("Can not find hardware status page\n"); ++ return -EINVAL; ++ } ++ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); ++ ++ if (dev_priv->status_gfx_addr != 0) ++ I915_WRITE(0x02080, dev_priv->status_gfx_addr); ++ else ++ I915_WRITE(0x02080, dev_priv->dma_status_page); ++ DRM_DEBUG("Enabled hardware status page\n"); ++ ++ return 0; ++} ++ ++static int i915_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_init_t *init = data; ++ int retcode = 0; ++ ++ switch (init->func) { ++ case I915_INIT_DMA: ++ case I915_INIT_DMA2: ++ retcode = i915_initialize(dev, file_priv, init); ++ break; ++ case I915_CLEANUP_DMA: ++ retcode = i915_dma_cleanup(dev); ++ break; ++ case I915_RESUME_DMA: ++ retcode = i915_dma_resume(dev); ++ break; ++ default: ++ retcode = -EINVAL; ++ break; ++ } ++ ++ return retcode; ++} ++ ++/* Implement basically the same security restrictions as hardware does ++ * for MI_BATCH_NON_SECURE. These can be made stricter at any time. ++ * ++ * Most of the calculations below involve calculating the size of a ++ * particular instruction. It's important to get the size right as ++ * that tells us where the next instruction to check is. Any illegal ++ * instruction detected will be given a size of zero, which is a ++ * signal to abort the rest of the buffer. ++ */ ++static int do_validate_cmd(int cmd) ++{ ++ switch (((cmd >> 29) & 0x7)) { ++ case 0x0: ++ switch ((cmd >> 23) & 0x3f) { ++ case 0x0: ++ return 1; /* MI_NOOP */ ++ case 0x4: ++ return 1; /* MI_FLUSH */ ++ default: ++ return 0; /* disallow everything else */ ++ } ++ break; ++ case 0x1: ++ return 0; /* reserved */ ++ case 0x2: ++ return (cmd & 0xff) + 2; /* 2d commands */ ++ case 0x3: ++ if (((cmd >> 24) & 0x1f) <= 0x18) ++ return 1; ++ ++ switch ((cmd >> 24) & 0x1f) { ++ case 0x1c: ++ return 1; ++ case 0x1d: ++ switch ((cmd >> 16) & 0xff) { ++ case 0x3: ++ return (cmd & 0x1f) + 2; ++ case 0x4: ++ return (cmd & 0xf) + 2; ++ default: ++ return (cmd & 0xffff) + 2; ++ } ++ case 0x1e: ++ if (cmd & (1 << 23)) ++ return (cmd & 0xffff) + 1; ++ else ++ return 1; ++ case 0x1f: ++ if ((cmd & (1 << 23)) == 0) /* inline vertices */ ++ return (cmd & 0x1ffff) + 2; ++ else if (cmd & (1 << 17)) /* indirect random */ ++ if ((cmd & 0xffff) == 0) ++ return 0; /* unknown length, too hard */ ++ else ++ return (((cmd & 0xffff) + 1) / 2) + 1; ++ else ++ return 2; /* indirect sequential */ ++ default: ++ return 0; ++ } ++ default: ++ return 0; ++ } ++ ++ return 0; ++} ++ ++static int validate_cmd(int cmd) ++{ ++ int ret = do_validate_cmd(cmd); ++ ++/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ ++ ++ return ret; ++} ++ ++static int i915_emit_cmds(struct drm_device *dev, int __user *buffer, ++ int dwords) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ ++ if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) ++ return -EINVAL; ++ ++ BEGIN_LP_RING((dwords+1)&~1); ++ ++ for (i = 0; i < dwords;) { ++ int cmd, sz; ++ ++ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) ++ return -EINVAL; ++ ++ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) ++ return -EINVAL; ++ ++ OUT_RING(cmd); ++ ++ while (++i, --sz) { ++ if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], ++ sizeof(cmd))) { ++ return -EINVAL; ++ } ++ OUT_RING(cmd); ++ } ++ } ++ ++ if (dwords & 1) ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ ++ return 0; ++} ++ ++int i915_emit_box(struct drm_device * dev, ++ struct drm_clip_rect __user * boxes, ++ int i, int DR1, int DR4) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect box; ++ RING_LOCALS; ++ ++ if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { ++ return -EFAULT; ++ } ++ ++ if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { ++ DRM_ERROR("Bad box %d,%d..%d,%d\n", ++ box.x1, box.y1, box.x2, box.y2); ++ return -EINVAL; ++ } ++ ++ if (IS_I965G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(GFX_OP_DRAWRECT_INFO_I965); ++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); ++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); ++ OUT_RING(DR4); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(6); ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(DR1); ++ OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); ++ OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); ++ OUT_RING(DR4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } ++ ++ return 0; ++} ++ ++/* XXX: Emitting the counter should really be moved to part of the IRQ ++ * emit. For now, do it in both places: ++ */ ++ ++void i915_emit_breadcrumb(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ if (++dev_priv->counter > BREADCRUMB_MASK) { ++ dev_priv->counter = 1; ++ DRM_DEBUG("Breadcrumb counter wrapped around\n"); ++ } ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_enqueue = dev_priv->counter; ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(dev_priv->counter); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++ ++int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t flush_cmd = MI_FLUSH; ++ RING_LOCALS; ++ ++ flush_cmd |= flush; ++ ++ i915_kernel_lost_context(dev); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(flush_cmd); ++ OUT_RING(0); ++ OUT_RING(0); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ ++ return 0; ++} ++ ++ ++static int i915_dispatch_cmdbuffer(struct drm_device * dev, ++ drm_i915_cmdbuffer_t * cmd) ++{ ++#ifdef I915_HAVE_FENCE ++ drm_i915_private_t *dev_priv = dev->dev_private; ++#endif ++ int nbox = cmd->num_cliprects; ++ int i = 0, count, ret; ++ ++ if (cmd->sz & 0x3) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ ret = i915_emit_box(dev, cmd->cliprects, i, ++ cmd->DR1, cmd->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); ++ if (ret) ++ return ret; ++ } ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely((dev_priv->counter & 0xFF) == 0)) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++ return 0; ++} ++ ++int i915_dispatch_batchbuffer(struct drm_device * dev, ++ drm_i915_batchbuffer_t * batch) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect __user *boxes = batch->cliprects; ++ int nbox = batch->num_cliprects; ++ int i = 0, count; ++ RING_LOCALS; ++ ++ if ((batch->start | batch->used) & 0x7) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ int ret = i915_emit_box(dev, boxes, i, ++ batch->DR1, batch->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ if (IS_I830(dev) || IS_845G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_BATCH_BUFFER); ++ OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ OUT_RING(batch->start + batch->used - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(2); ++ if (IS_I965G(dev)) { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); ++ OUT_RING(batch->start); ++ } else { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); ++ OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ } ++ ADVANCE_LP_RING(); ++ } ++ } ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely((dev_priv->counter & 0xFF) == 0)) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++ return 0; ++} ++ ++static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ u32 num_pages, current_page, next_page, dspbase; ++ int shift = 2 * plane, x, y; ++ RING_LOCALS; ++ ++ /* Calculate display base offset */ ++ num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; ++ current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3; ++ next_page = (current_page + 1) % num_pages; ++ ++ switch (next_page) { ++ default: ++ case 0: ++ dspbase = dev_priv->sarea_priv->front_offset; ++ break; ++ case 1: ++ dspbase = dev_priv->sarea_priv->back_offset; ++ break; ++ case 2: ++ dspbase = dev_priv->sarea_priv->third_offset; ++ break; ++ } ++ ++ if (plane == 0) { ++ x = dev_priv->sarea_priv->planeA_x; ++ y = dev_priv->sarea_priv->planeA_y; ++ } else { ++ x = dev_priv->sarea_priv->planeB_x; ++ y = dev_priv->sarea_priv->planeB_y; ++ } ++ ++ dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp; ++ ++ DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, ++ dspbase); ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(sync ? 0 : ++ (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP : ++ MI_WAIT_FOR_PLANE_A_FLIP))); ++ OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) | ++ (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); ++ OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp); ++ OUT_RING(dspbase); ++ ADVANCE_LP_RING(); ++ ++ dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift); ++ dev_priv->sarea_priv->pf_current_page |= next_page << shift; ++} ++ ++void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n", ++ planes, dev_priv->sarea_priv->pf_current_page); ++ ++ i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); ++ ++ for (i = 0; i < 2; i++) ++ if (planes & (1 << i)) ++ i915_do_dispatch_flip(dev, i, sync); ++ ++ i915_emit_breadcrumb(dev); ++#ifdef I915_HAVE_FENCE ++ if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0))) ++ drm_fence_flush_old(dev, 0, dev_priv->counter); ++#endif ++} ++ ++int i915_quiescent(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ i915_kernel_lost_context(dev); ++ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); ++ if (ret) ++ { ++ i915_kernel_lost_context (dev); ++ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n", ++ dev_priv->ring.head, ++ dev_priv->ring.tail, ++ dev_priv->ring.space); ++ } ++ return ret; ++} ++ ++static int i915_flush_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return i915_quiescent(dev); ++} ++ ++static int i915_batchbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i915_batchbuffer_t *batch = data; ++ int ret; ++ ++ if (!dev_priv->allow_batchbuffer) { ++ DRM_ERROR("Batchbuffer ioctl disabled\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", ++ batch->start, batch->used, batch->num_cliprects); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, ++ batch->num_cliprects * ++ sizeof(struct drm_clip_rect))) ++ return -EFAULT; ++ ++ ret = i915_dispatch_batchbuffer(dev, batch); ++ ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ return ret; ++} ++ ++static int i915_cmdbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ drm_i915_cmdbuffer_t *cmdbuf = data; ++ int ret; ++ ++ DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", ++ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (cmdbuf->num_cliprects && ++ DRM_VERIFYAREA_READ(cmdbuf->cliprects, ++ cmdbuf->num_cliprects * ++ sizeof(struct drm_clip_rect))) { ++ DRM_ERROR("Fault accessing cliprects\n"); ++ return -EFAULT; ++ } ++ ++ ret = i915_dispatch_cmdbuffer(dev, cmdbuf); ++ if (ret) { ++ DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); ++ return ret; ++ } ++ ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ return 0; ++} ++ ++#if defined(DRM_DEBUG_CODE) ++#define DRM_DEBUG_RELOCATION (drm_debug != 0) ++#else ++#define DRM_DEBUG_RELOCATION 0 ++#endif ++ ++static int i915_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0, planes = 0; i < 2; i++) ++ if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) { ++ dev_priv->sarea_priv->pf_current_page = ++ (dev_priv->sarea_priv->pf_current_page & ++ ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i)); ++ ++ planes |= 1 << i; ++ } ++ ++ if (planes) ++ i915_dispatch_flip(dev, planes, 0); ++ ++ return 0; ++} ++ ++static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_i915_flip_t *param = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* This is really planes */ ++ if (param->pipes & ~0x3) { ++ DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", ++ param->pipes); ++ return -EINVAL; ++ } ++ ++ i915_dispatch_flip(dev, param->pipes, 0); ++ ++ return 0; ++} ++ ++ ++static int i915_getparam(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case I915_PARAM_IRQ_ACTIVE: ++ value = dev->irq_enabled ? 1 : 0; ++ break; ++ case I915_PARAM_ALLOW_BATCHBUFFER: ++ value = dev_priv->allow_batchbuffer ? 1 : 0; ++ break; ++ case I915_PARAM_LAST_DISPATCH: ++ value = READ_BREADCRUMB(dev_priv); ++ break; ++ case I915_PARAM_CHIPSET_ID: ++ value = dev->pci_device; ++ break; ++ case I915_PARAM_HAS_GEM: ++ value = 1; ++ break; ++ default: ++ DRM_ERROR("Unknown parameter %d\n", param->param); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int i915_setparam(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_setparam_t *param = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case I915_SETPARAM_USE_MI_BATCHBUFFER_START: ++ break; ++ case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: ++ dev_priv->tex_lru_log_granularity = param->value; ++ break; ++ case I915_SETPARAM_ALLOW_BATCHBUFFER: ++ dev_priv->allow_batchbuffer = param->value; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %d\n", param->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++drm_i915_mmio_entry_t mmio_table[] = { ++ [MMIO_REGS_PS_DEPTH_COUNT] = { ++ I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, ++ 0x2350, ++ 8 ++ } ++}; ++ ++static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); ++ ++static int i915_mmio(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ uint32_t buf[8]; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mmio_entry_t *e; ++ drm_i915_mmio_t *mmio = data; ++ void __iomem *base; ++ int i; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (mmio->reg >= mmio_table_size) ++ return -EINVAL; ++ ++ e = &mmio_table[mmio->reg]; ++ base = (u8 *) dev_priv->mmio_map->handle + e->offset; ++ ++ switch (mmio->read_write) { ++ case I915_MMIO_READ: ++ if (!(e->flag & I915_MMIO_MAY_READ)) ++ return -EINVAL; ++ for (i = 0; i < e->size / 4; i++) ++ buf[i] = I915_READ(e->offset + i * 4); ++ if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ break; ++ ++ case I915_MMIO_WRITE: ++ if (!(e->flag & I915_MMIO_MAY_WRITE)) ++ return -EINVAL; ++ if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { ++ DRM_ERROR("DRM_COPY_TO_USER failed\n"); ++ return -EFAULT; ++ } ++ for (i = 0; i < e->size / 4; i++) ++ I915_WRITE(e->offset + i * 4, buf[i]); ++ break; ++ } ++ return 0; ++} ++ ++static int i915_set_status_page(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_hws_addr_t *hws = data; ++ ++ if (!I915_NEED_GFX_HWS(dev)) ++ return -EINVAL; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); ++ ++ dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); ++ ++ dev_priv->hws_map.offset = dev->agp->base + hws->addr; ++ dev_priv->hws_map.size = 4*1024; ++ dev_priv->hws_map.type = 0; ++ dev_priv->hws_map.flags = 0; ++ dev_priv->hws_map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->hws_map, dev); ++ if (dev_priv->hws_map.handle == NULL) { ++ i915_dma_cleanup(dev); ++ dev_priv->status_gfx_addr = 0; ++ DRM_ERROR("can not ioremap virtual address for" ++ " G33 hw status page\n"); ++ return -ENOMEM; ++ } ++ dev_priv->hw_status_page = dev_priv->hws_map.handle; ++ ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", ++ dev_priv->status_gfx_addr); ++ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); ++ return 0; ++} ++ ++int i915_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ struct drm_i915_private *dev_priv; ++ unsigned long base, size; ++ int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; ++ ++ /* i915 has 4 more counters */ ++ dev->counters += 4; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ dev->types[9] = _DRM_STAT_DMA; ++ ++ dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_i915_private_t)); ++ ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->dev = dev; ++ ++ /* Add register map (needed for suspend/resume) */ ++ base = drm_get_resource_start(dev, mmio_bar); ++ size = drm_get_resource_len(dev, mmio_bar); ++ ++ ret = drm_addmap(dev, base, size, _DRM_REGISTERS, ++ _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); ++#ifdef I915_HAVE_GEM ++ i915_gem_load(dev); ++#endif ++ DRM_SPININIT(&dev_priv->swaps_lock, "swap"); ++ DRM_SPININIT(&dev_priv->user_irq_lock, "userirq"); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ intel_init_chipset_flush_compat(dev); ++#endif ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_init(dev); ++#endif ++#endif ++ ++ /* Init HWS */ ++ if (!I915_NEED_GFX_HWS(dev)) { ++ ret = i915_init_hardware_status(dev); ++ if(ret) ++ return ret; ++ } ++ ++ return ret; ++} ++ ++int i915_driver_unload(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ i915_free_hardware_status(dev); ++ ++ drm_rmmap(dev, dev_priv->mmio_map); ++ ++ DRM_SPINUNINIT(&dev_priv->swaps_lock); ++ DRM_SPINUNINIT(&dev_priv->user_irq_lock); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_free(dev); ++#endif ++#endif ++ ++ drm_free(dev->dev_private, sizeof(drm_i915_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++ intel_fini_chipset_flush_compat(dev); ++#endif ++#endif ++ return 0; ++} ++ ++void i915_driver_lastclose(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ /* agp off can use this to get called before dev_priv */ ++ if (!dev_priv) ++ return; ++ ++#ifdef I915_HAVE_BUFFER ++ if (dev_priv->val_bufs) { ++ vfree(dev_priv->val_bufs); ++ dev_priv->val_bufs = NULL; ++ } ++#endif ++#ifdef I915_HAVE_GEM ++ i915_gem_lastclose(dev); ++#endif ++ if (drm_getsarea(dev) && dev_priv->sarea_priv) ++ i915_do_cleanup_pageflip(dev); ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv = NULL; ++ if (dev_priv->agp_heap) ++ i915_mem_takedown(&(dev_priv->agp_heap)); ++#if defined(I915_HAVE_BUFFER) ++ if (dev_priv->sarea_kmap.virtual) { ++ drm_bo_kunmap(&dev_priv->sarea_kmap); ++ dev_priv->sarea_kmap.virtual = NULL; ++ dev->lock.hw_lock = NULL; ++ dev->sigdata.lock = NULL; ++ } ++ ++ if (dev_priv->sarea_bo) { ++ mutex_lock(&dev->struct_mutex); ++ drm_bo_usage_deref_locked(&dev_priv->sarea_bo); ++ mutex_unlock(&dev->struct_mutex); ++ dev_priv->sarea_bo = NULL; ++ } ++#endif ++ i915_dma_cleanup(dev); ++} ++ ++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv; ++ ++ DRM_DEBUG("\n"); ++ i915_file_priv = (struct drm_i915_file_private *) ++ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); ++ ++ if (!i915_file_priv) ++ return -ENOMEM; ++ ++ file_priv->driver_priv = i915_file_priv; ++ ++ i915_file_priv->mm.last_gem_seqno = 0; ++ i915_file_priv->mm.last_gem_throttle_seqno = 0; ++ ++ return 0; ++} ++ ++void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ i915_mem_release(dev, file_priv, dev_priv->agp_heap); ++} ++ ++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ ++ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); ++} ++ ++struct drm_ioctl_desc i915_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), ++ DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), ++ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), ++ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), ++#ifdef I915_HAVE_BUFFER ++ DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), ++#endif ++#ifdef I915_HAVE_GEM ++ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), ++ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), ++#endif ++}; ++ ++int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * All Intel graphics chipsets are treated as AGP, even if they are really ++ * PCI-e. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * A value of 1 is always retured to indictate every i9x5 is AGP. ++ */ ++int i915_driver_device_is_agp(struct drm_device * dev) ++{ ++ return 1; ++} ++ ++int i915_driver_firstopen(struct drm_device *dev) ++{ ++#ifdef I915_HAVE_BUFFER ++ drm_bo_driver_init(dev); ++#endif ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_drm.h git-nokia/drivers/gpu/drm-tungsten/i915_drm.h +--- git/drivers/gpu/drm-tungsten/i915_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,719 @@ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _I915_DRM_H_ ++#define _I915_DRM_H_ ++ ++/* Please note that modifications to all structs defined here are ++ * subject to backwards-compatibility constraints. ++ */ ++ ++#include "drm.h" ++ ++/* Each region is a minimum of 16k, and there are at most 255 of them. ++ */ ++#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use ++ * of chars for next/prev indices */ ++#define I915_LOG_MIN_TEX_REGION_SIZE 14 ++ ++typedef struct _drm_i915_init { ++ enum { ++ I915_INIT_DMA = 0x01, ++ I915_CLEANUP_DMA = 0x02, ++ I915_RESUME_DMA = 0x03, ++ ++ /* Since this struct isn't versioned, just used a new ++ * 'func' code to indicate the presence of dri2 sarea ++ * info. */ ++ I915_INIT_DMA2 = 0x04 ++ } func; ++ unsigned int mmio_offset; ++ int sarea_priv_offset; ++ unsigned int ring_start; ++ unsigned int ring_end; ++ unsigned int ring_size; ++ unsigned int front_offset; ++ unsigned int back_offset; ++ unsigned int depth_offset; ++ unsigned int w; ++ unsigned int h; ++ unsigned int pitch; ++ unsigned int pitch_bits; ++ unsigned int back_pitch; ++ unsigned int depth_pitch; ++ unsigned int cpp; ++ unsigned int chipset; ++ unsigned int sarea_handle; ++} drm_i915_init_t; ++ ++typedef struct drm_i915_sarea { ++ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; ++ int last_upload; /* last time texture was uploaded */ ++ int last_enqueue; /* last time a buffer was enqueued */ ++ int last_dispatch; /* age of the most recently dispatched buffer */ ++ int ctxOwner; /* last context to upload state */ ++ int texAge; ++ int pf_enabled; /* is pageflipping allowed? */ ++ int pf_active; ++ int pf_current_page; /* which buffer is being displayed? */ ++ int perf_boxes; /* performance boxes to be displayed */ ++ int width, height; /* screen size in pixels */ ++ ++ drm_handle_t front_handle; ++ int front_offset; ++ int front_size; ++ ++ drm_handle_t back_handle; ++ int back_offset; ++ int back_size; ++ ++ drm_handle_t depth_handle; ++ int depth_offset; ++ int depth_size; ++ ++ drm_handle_t tex_handle; ++ int tex_offset; ++ int tex_size; ++ int log_tex_granularity; ++ int pitch; ++ int rotation; /* 0, 90, 180 or 270 */ ++ int rotated_offset; ++ int rotated_size; ++ int rotated_pitch; ++ int virtualX, virtualY; ++ ++ unsigned int front_tiled; ++ unsigned int back_tiled; ++ unsigned int depth_tiled; ++ unsigned int rotated_tiled; ++ unsigned int rotated2_tiled; ++ ++ int planeA_x; ++ int planeA_y; ++ int planeA_w; ++ int planeA_h; ++ int planeB_x; ++ int planeB_y; ++ int planeB_w; ++ int planeB_h; ++ ++ /* Triple buffering */ ++ drm_handle_t third_handle; ++ int third_offset; ++ int third_size; ++ unsigned int third_tiled; ++ ++ /* buffer object handles for the static buffers. May change ++ * over the lifetime of the client, though it doesn't in our current ++ * implementation. ++ */ ++ unsigned int front_bo_handle; ++ unsigned int back_bo_handle; ++ unsigned int third_bo_handle; ++ unsigned int depth_bo_handle; ++} drm_i915_sarea_t; ++ ++/* Driver specific fence types and classes. ++ */ ++ ++/* The only fence class we support */ ++#define DRM_I915_FENCE_CLASS_ACCEL 0 ++/* Fence type that guarantees read-write flush */ ++#define DRM_I915_FENCE_TYPE_RW 2 ++/* MI_FLUSH programmed just before the fence */ ++#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000 ++ ++/* Flags for perf_boxes ++ */ ++#define I915_BOX_RING_EMPTY 0x1 ++#define I915_BOX_FLIP 0x2 ++#define I915_BOX_WAIT 0x4 ++#define I915_BOX_TEXTURE_LOAD 0x8 ++#define I915_BOX_LOST_CONTEXT 0x10 ++ ++/* I915 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_I915_INIT 0x00 ++#define DRM_I915_FLUSH 0x01 ++#define DRM_I915_FLIP 0x02 ++#define DRM_I915_BATCHBUFFER 0x03 ++#define DRM_I915_IRQ_EMIT 0x04 ++#define DRM_I915_IRQ_WAIT 0x05 ++#define DRM_I915_GETPARAM 0x06 ++#define DRM_I915_SETPARAM 0x07 ++#define DRM_I915_ALLOC 0x08 ++#define DRM_I915_FREE 0x09 ++#define DRM_I915_INIT_HEAP 0x0a ++#define DRM_I915_CMDBUFFER 0x0b ++#define DRM_I915_DESTROY_HEAP 0x0c ++#define DRM_I915_SET_VBLANK_PIPE 0x0d ++#define DRM_I915_GET_VBLANK_PIPE 0x0e ++#define DRM_I915_VBLANK_SWAP 0x0f ++#define DRM_I915_MMIO 0x10 ++#define DRM_I915_HWS_ADDR 0x11 ++#define DRM_I915_EXECBUFFER 0x12 ++#define DRM_I915_GEM_INIT 0x13 ++#define DRM_I915_GEM_EXECBUFFER 0x14 ++#define DRM_I915_GEM_PIN 0x15 ++#define DRM_I915_GEM_UNPIN 0x16 ++#define DRM_I915_GEM_BUSY 0x17 ++#define DRM_I915_GEM_THROTTLE 0x18 ++#define DRM_I915_GEM_ENTERVT 0x19 ++#define DRM_I915_GEM_LEAVEVT 0x1a ++#define DRM_I915_GEM_CREATE 0x1b ++#define DRM_I915_GEM_PREAD 0x1c ++#define DRM_I915_GEM_PWRITE 0x1d ++#define DRM_I915_GEM_MMAP 0x1e ++#define DRM_I915_GEM_SET_DOMAIN 0x1f ++#define DRM_I915_GEM_SW_FINISH 0x20 ++#define DRM_I915_GEM_SET_TILING 0x21 ++#define DRM_I915_GEM_GET_TILING 0x22 ++ ++#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) ++#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ++#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t) ++#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) ++#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) ++#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) ++#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) ++#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) ++#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) ++#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) ++#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) ++#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) ++#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) ++#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) ++#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) ++#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) ++#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio) ++#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer) ++#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) ++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) ++#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) ++#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) ++#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) ++#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) ++#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) ++#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) ++#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) ++#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) ++#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) ++#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) ++#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) ++#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) ++#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) ++#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) ++ ++/* Asynchronous page flipping: ++ */ ++typedef struct drm_i915_flip { ++ /* ++ * This is really talking about planes, and we could rename it ++ * except for the fact that some of the duplicated i915_drm.h files ++ * out there check for HAVE_I915_FLIP and so might pick up this ++ * version. ++ */ ++ int pipes; ++} drm_i915_flip_t; ++ ++/* Allow drivers to submit batchbuffers directly to hardware, relying ++ * on the security mechanisms provided by hardware. ++ */ ++typedef struct drm_i915_batchbuffer { ++ int start; /* agp offset */ ++ int used; /* nr bytes in use */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ ++} drm_i915_batchbuffer_t; ++ ++/* As above, but pass a pointer to userspace buffer which can be ++ * validated by the kernel prior to sending to hardware. ++ */ ++typedef struct _drm_i915_cmdbuffer { ++ char __user *buf; /* pointer to userspace command buffer */ ++ int sz; /* nr bytes in buf */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ ++} drm_i915_cmdbuffer_t; ++ ++/* Userspace can request & wait on irq's: ++ */ ++typedef struct drm_i915_irq_emit { ++ int __user *irq_seq; ++} drm_i915_irq_emit_t; ++ ++typedef struct drm_i915_irq_wait { ++ int irq_seq; ++} drm_i915_irq_wait_t; ++ ++/* Ioctl to query kernel params: ++ */ ++#define I915_PARAM_IRQ_ACTIVE 1 ++#define I915_PARAM_ALLOW_BATCHBUFFER 2 ++#define I915_PARAM_LAST_DISPATCH 3 ++#define I915_PARAM_CHIPSET_ID 4 ++#define I915_PARAM_HAS_GEM 5 ++ ++typedef struct drm_i915_getparam { ++ int param; ++ int __user *value; ++} drm_i915_getparam_t; ++ ++/* Ioctl to set kernel params: ++ */ ++#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 ++#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 ++#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 ++ ++typedef struct drm_i915_setparam { ++ int param; ++ int value; ++} drm_i915_setparam_t; ++ ++/* A memory manager for regions of shared memory: ++ */ ++#define I915_MEM_REGION_AGP 1 ++ ++typedef struct drm_i915_mem_alloc { ++ int region; ++ int alignment; ++ int size; ++ int __user *region_offset; /* offset from start of fb or agp */ ++} drm_i915_mem_alloc_t; ++ ++typedef struct drm_i915_mem_free { ++ int region; ++ int region_offset; ++} drm_i915_mem_free_t; ++ ++typedef struct drm_i915_mem_init_heap { ++ int region; ++ int size; ++ int start; ++} drm_i915_mem_init_heap_t; ++ ++/* Allow memory manager to be torn down and re-initialized (eg on ++ * rotate): ++ */ ++typedef struct drm_i915_mem_destroy_heap { ++ int region; ++} drm_i915_mem_destroy_heap_t; ++ ++/* Allow X server to configure which pipes to monitor for vblank signals ++ */ ++#define DRM_I915_VBLANK_PIPE_A 1 ++#define DRM_I915_VBLANK_PIPE_B 2 ++ ++typedef struct drm_i915_vblank_pipe { ++ int pipe; ++} drm_i915_vblank_pipe_t; ++ ++/* Schedule buffer swap at given vertical blank: ++ */ ++typedef struct drm_i915_vblank_swap { ++ drm_drawable_t drawable; ++ enum drm_vblank_seq_type seqtype; ++ unsigned int sequence; ++} drm_i915_vblank_swap_t; ++ ++#define I915_MMIO_READ 0 ++#define I915_MMIO_WRITE 1 ++ ++#define I915_MMIO_MAY_READ 0x1 ++#define I915_MMIO_MAY_WRITE 0x2 ++ ++#define MMIO_REGS_IA_PRIMATIVES_COUNT 0 ++#define MMIO_REGS_IA_VERTICES_COUNT 1 ++#define MMIO_REGS_VS_INVOCATION_COUNT 2 ++#define MMIO_REGS_GS_PRIMITIVES_COUNT 3 ++#define MMIO_REGS_GS_INVOCATION_COUNT 4 ++#define MMIO_REGS_CL_PRIMITIVES_COUNT 5 ++#define MMIO_REGS_CL_INVOCATION_COUNT 6 ++#define MMIO_REGS_PS_INVOCATION_COUNT 7 ++#define MMIO_REGS_PS_DEPTH_COUNT 8 ++ ++typedef struct drm_i915_mmio_entry { ++ unsigned int flag; ++ unsigned int offset; ++ unsigned int size; ++} drm_i915_mmio_entry_t; ++ ++typedef struct drm_i915_mmio { ++ unsigned int read_write:1; ++ unsigned int reg:31; ++ void __user *data; ++} drm_i915_mmio_t; ++ ++typedef struct drm_i915_hws_addr { ++ uint64_t addr; ++} drm_i915_hws_addr_t; ++ ++/* ++ * Relocation header is 4 uint32_ts ++ * 0 - 32 bit reloc count ++ * 1 - 32-bit relocation type ++ * 2-3 - 64-bit user buffer handle ptr for another list of relocs. ++ */ ++#define I915_RELOC_HEADER 4 ++ ++/* ++ * type 0 relocation has 4-uint32_t stride ++ * 0 - offset into buffer ++ * 1 - delta to add in ++ * 2 - buffer handle ++ * 3 - reserved (for optimisations later). ++ */ ++/* ++ * type 1 relocation has 4-uint32_t stride. ++ * Hangs off the first item in the op list. ++ * Performed after all valiations are done. ++ * Try to group relocs into the same relocatee together for ++ * performance reasons. ++ * 0 - offset into buffer ++ * 1 - delta to add in ++ * 2 - buffer index in op list. ++ * 3 - relocatee index in op list. ++ */ ++#define I915_RELOC_TYPE_0 0 ++#define I915_RELOC0_STRIDE 4 ++#define I915_RELOC_TYPE_1 1 ++#define I915_RELOC1_STRIDE 4 ++ ++ ++struct drm_i915_op_arg { ++ uint64_t next; ++ uint64_t reloc_ptr; ++ int handled; ++ unsigned int pad64; ++ union { ++ struct drm_bo_op_req req; ++ struct drm_bo_arg_rep rep; ++ } d; ++ ++}; ++ ++struct drm_i915_execbuffer { ++ uint64_t ops_list; ++ uint32_t num_buffers; ++ struct drm_i915_batchbuffer batch; ++ drm_context_t context; /* for lockless use in the future */ ++ struct drm_fence_arg fence_arg; ++}; ++ ++struct drm_i915_gem_init { ++ /** ++ * Beginning offset in the GTT to be managed by the DRM memory ++ * manager. ++ */ ++ uint64_t gtt_start; ++ /** ++ * Ending offset in the GTT to be managed by the DRM memory ++ * manager. ++ */ ++ uint64_t gtt_end; ++}; ++ ++struct drm_i915_gem_create { ++ /** ++ * Requested size for the object. ++ * ++ * The (page-aligned) allocated size for the object will be returned. ++ */ ++ uint64_t size; ++ /** ++ * Returned handle for the object. ++ * ++ * Object handles are nonzero. ++ */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_i915_gem_pread { ++ /** Handle for the object being read. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to read from */ ++ uint64_t offset; ++ /** Length of data to read */ ++ uint64_t size; ++ /** Pointer to write the data into. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_pwrite { ++ /** Handle for the object being written to. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset into the object to write to */ ++ uint64_t offset; ++ /** Length of data to write */ ++ uint64_t size; ++ /** Pointer to read the data from. */ ++ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_mmap { ++ /** Handle for the object being mapped. */ ++ uint32_t handle; ++ uint32_t pad; ++ /** Offset in the object to map. */ ++ uint64_t offset; ++ /** ++ * Length of data to map. ++ * ++ * The value will be page-aligned. ++ */ ++ uint64_t size; ++ /** Returned pointer the data was mapped at */ ++ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */ ++}; ++ ++struct drm_i915_gem_set_domain { ++ /** Handle for the object */ ++ uint32_t handle; ++ ++ /** New read domains */ ++ uint32_t read_domains; ++ ++ /** New write domain */ ++ uint32_t write_domain; ++}; ++ ++struct drm_i915_gem_sw_finish { ++ /** Handle for the object */ ++ uint32_t handle; ++}; ++ ++struct drm_i915_gem_relocation_entry { ++ /** ++ * Handle of the buffer being pointed to by this relocation entry. ++ * ++ * It's appealing to make this be an index into the mm_validate_entry ++ * list to refer to the buffer, but this allows the driver to create ++ * a relocation list for state buffers and not re-write it per ++ * exec using the buffer. ++ */ ++ uint32_t target_handle; ++ ++ /** ++ * Value to be added to the offset of the target buffer to make up ++ * the relocation entry. ++ */ ++ uint32_t delta; ++ ++ /** Offset in the buffer the relocation entry will be written into */ ++ uint64_t offset; ++ ++ /** ++ * Offset value of the target buffer that the relocation entry was last ++ * written as. ++ * ++ * If the buffer has the same offset as last time, we can skip syncing ++ * and writing the relocation. This value is written back out by ++ * the execbuffer ioctl when the relocation is written. ++ */ ++ uint64_t presumed_offset; ++ ++ /** ++ * Target memory domains read by this operation. ++ */ ++ uint32_t read_domains; ++ ++ /** ++ * Target memory domains written by this operation. ++ * ++ * Note that only one domain may be written by the whole ++ * execbuffer operation, so that where there are conflicts, ++ * the application will get -EINVAL back. ++ */ ++ uint32_t write_domain; ++}; ++ ++/** @{ ++ * Intel memory domains ++ * ++ * Most of these just align with the various caches in ++ * the system and are used to flush and invalidate as ++ * objects end up cached in different domains. ++ */ ++/** CPU cache */ ++#define I915_GEM_DOMAIN_CPU 0x00000001 ++/** Render cache, used by 2D and 3D drawing */ ++#define I915_GEM_DOMAIN_RENDER 0x00000002 ++/** Sampler cache, used by texture engine */ ++#define I915_GEM_DOMAIN_SAMPLER 0x00000004 ++/** Command queue, used to load batch buffers */ ++#define I915_GEM_DOMAIN_COMMAND 0x00000008 ++/** Instruction cache, used by shader programs */ ++#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 ++/** Vertex address cache */ ++#define I915_GEM_DOMAIN_VERTEX 0x00000020 ++/** GTT domain - aperture and scanout */ ++#define I915_GEM_DOMAIN_GTT 0x00000040 ++/** @} */ ++ ++struct drm_i915_gem_exec_object { ++ /** ++ * User's handle for a buffer to be bound into the GTT for this ++ * operation. ++ */ ++ uint32_t handle; ++ ++ /** Number of relocations to be performed on this buffer */ ++ uint32_t relocation_count; ++ /** ++ * Pointer to array of struct drm_i915_gem_relocation_entry containing ++ * the relocations to be performed in this buffer. ++ */ ++ uint64_t relocs_ptr; ++ ++ /** Required alignment in graphics aperture */ ++ uint64_t alignment; ++ ++ /** ++ * Returned value of the updated offset of the object, for future ++ * presumed_offset writes. ++ */ ++ uint64_t offset; ++}; ++ ++struct drm_i915_gem_execbuffer { ++ /** ++ * List of buffers to be validated with their relocations to be ++ * performend on them. ++ * ++ * This is a pointer to an array of struct drm_i915_gem_validate_entry. ++ * ++ * These buffers must be listed in an order such that all relocations ++ * a buffer is performing refer to buffers that have already appeared ++ * in the validate list. ++ */ ++ uint64_t buffers_ptr; ++ uint32_t buffer_count; ++ ++ /** Offset in the batchbuffer to start execution from. */ ++ uint32_t batch_start_offset; ++ /** Bytes used in batchbuffer from batch_start_offset */ ++ uint32_t batch_len; ++ uint32_t DR1; ++ uint32_t DR4; ++ uint32_t num_cliprects; ++ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */ ++}; ++ ++struct drm_i915_gem_pin { ++ /** Handle of the buffer to be pinned. */ ++ uint32_t handle; ++ uint32_t pad; ++ ++ /** alignment required within the aperture */ ++ uint64_t alignment; ++ ++ /** Returned GTT offset of the buffer. */ ++ uint64_t offset; ++}; ++ ++struct drm_i915_gem_unpin { ++ /** Handle of the buffer to be unpinned. */ ++ uint32_t handle; ++ uint32_t pad; ++}; ++ ++struct drm_i915_gem_busy { ++ /** Handle of the buffer to check for busy */ ++ uint32_t handle; ++ ++ /** Return busy status (1 if busy, 0 if idle) */ ++ uint32_t busy; ++}; ++ ++#define I915_TILING_NONE 0 ++#define I915_TILING_X 1 ++#define I915_TILING_Y 2 ++ ++#define I915_BIT_6_SWIZZLE_NONE 0 ++#define I915_BIT_6_SWIZZLE_9 1 ++#define I915_BIT_6_SWIZZLE_9_10 2 ++#define I915_BIT_6_SWIZZLE_9_11 3 ++#define I915_BIT_6_SWIZZLE_9_10_11 4 ++/* Not seen by userland */ ++#define I915_BIT_6_SWIZZLE_UNKNOWN 5 ++ ++struct drm_i915_gem_set_tiling { ++ /** Handle of the buffer to have its tiling state updated */ ++ uint32_t handle; ++ ++ /** ++ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, ++ * I915_TILING_Y). ++ * ++ * This value is to be set on request, and will be updated by the ++ * kernel on successful return with the actual chosen tiling layout. ++ * ++ * The tiling mode may be demoted to I915_TILING_NONE when the system ++ * has bit 6 swizzling that can't be managed correctly by GEM. ++ * ++ * Buffer contents become undefined when changing tiling_mode. ++ */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Stride in bytes for the object when in I915_TILING_X or ++ * I915_TILING_Y. ++ */ ++ uint32_t stride; ++ ++ /** ++ * Returned address bit 6 swizzling required for CPU access through ++ * mmap mapping. ++ */ ++ uint32_t swizzle_mode; ++}; ++ ++struct drm_i915_gem_get_tiling { ++ /** Handle of the buffer to get tiling state for. */ ++ uint32_t handle; ++ ++ /** ++ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, ++ * I915_TILING_Y). ++ */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Returned address bit 6 swizzling required for CPU access through ++ * mmap mapping. ++ */ ++ uint32_t swizzle_mode; ++}; ++ ++#endif /* _I915_DRM_H_ */ +diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.c git-nokia/drivers/gpu/drm-tungsten/i915_drv.c +--- git/drivers/gpu/drm-tungsten/i915_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,222 @@ ++/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ i915_PCI_IDS ++}; ++ ++#ifdef I915_HAVE_FENCE ++extern struct drm_fence_driver i915_fence_driver; ++#endif ++ ++#ifdef I915_HAVE_BUFFER ++ ++static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; ++static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; ++ ++static struct drm_bo_driver i915_bo_driver = { ++ .mem_type_prio = i915_mem_prios, ++ .mem_busy_prio = i915_busy_prios, ++ .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = i915_create_ttm_backend_entry, ++ .fence_type = i915_fence_type, ++ .invalidate_caches = i915_invalidate_caches, ++ .init_mem_type = i915_init_mem_type, ++ .evict_flags = i915_evict_flags, ++ .move = i915_move, ++ .ttm_cache_flush = i915_flush_ttm, ++ .command_stream_barrier = NULL, ++}; ++#endif ++ ++static int i915_suspend(struct drm_device *dev, pm_message_t state) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (!dev || !dev_priv) { ++ printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); ++ printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); ++ return -ENODEV; ++ } ++ ++ if (state.event == PM_EVENT_PRETHAW) ++ return 0; ++ ++ pci_save_state(dev->pdev); ++ ++ i915_save_state(dev); ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_free(dev); ++#endif ++ ++ if (state.event == PM_EVENT_SUSPEND) { ++ /* Shut down the device */ ++ pci_disable_device(dev->pdev); ++ pci_set_power_state(dev->pdev, PCI_D3hot); ++ } ++ ++ return 0; ++} ++ ++static int i915_resume(struct drm_device *dev) ++{ ++ pci_set_power_state(dev->pdev, PCI_D0); ++ pci_restore_state(dev->pdev); ++ if (pci_enable_device(dev->pdev)) ++ return -1; ++ pci_set_master(dev->pdev); ++ ++ i915_restore_state(dev); ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ intel_opregion_init(dev); ++#endif ++ ++ return 0; ++} ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static void remove(struct pci_dev *pdev); ++ ++static struct drm_driver driver = { ++ /* don't use mtrr's here, the Xserver or user space app should ++ * deal with them for intel hardware. ++ */ ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, ++ .load = i915_driver_load, ++ .unload = i915_driver_unload, ++ .firstopen = i915_driver_firstopen, ++ .open = i915_driver_open, ++ .lastclose = i915_driver_lastclose, ++ .preclose = i915_driver_preclose, ++ .postclose = i915_driver_postclose, ++ .suspend = i915_suspend, ++ .resume = i915_resume, ++ .device_is_agp = i915_driver_device_is_agp, ++ .get_vblank_counter = i915_get_vblank_counter, ++ .enable_vblank = i915_enable_vblank, ++ .disable_vblank = i915_disable_vblank, ++ .irq_preinstall = i915_driver_irq_preinstall, ++ .irq_postinstall = i915_driver_irq_postinstall, ++ .irq_uninstall = i915_driver_irq_uninstall, ++ .irq_handler = i915_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .proc_init = i915_gem_proc_init, ++ .proc_cleanup = i915_gem_proc_cleanup, ++ .ioctls = i915_ioctls, ++ .gem_init_object = i915_gem_init_object, ++ .gem_free_object = i915_gem_free_object, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = i915_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = remove, ++ }, ++#ifdef I915_HAVE_FENCE ++ .fence_driver = &i915_fence_driver, ++#endif ++#ifdef I915_HAVE_BUFFER ++ .bo_driver = &i915_bo_driver, ++#endif ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ int ret; ++ ++ /* On the 945G/GM, the chipset reports the MSI capability on the ++ * integrated graphics even though the support isn't actually there ++ * according to the published specs. It doesn't appear to function ++ * correctly in testing on 945G. ++ * This may be a side effect of MSI having been made available for PEG ++ * and the registers being closely associated. ++ */ ++ if (pdev->device != 0x2772 && pdev->device != 0x27A2) ++ (void )pci_enable_msi(pdev); ++ ++ ret = drm_get_dev(pdev, ent, &driver); ++ if (ret && pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ return ret; ++} ++static void remove(struct pci_dev *pdev) ++{ ++ if (pdev->msi_enabled) ++ pci_disable_msi(pdev); ++ drm_cleanup_pci(pdev); ++} ++ ++static int __init i915_init(void) ++{ ++ driver.num_ioctls = i915_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit i915_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(i915_init); ++module_exit(i915_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.h git-nokia/drivers/gpu/drm-tungsten/i915_drv.h +--- git/drivers/gpu/drm-tungsten/i915_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,2123 @@ ++/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _I915_DRV_H_ ++#define _I915_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Tungsten Graphics, Inc." ++ ++#define DRIVER_NAME "i915" ++#define DRIVER_DESC "Intel Graphics" ++#define DRIVER_DATE "20080730" ++ ++#if defined(__linux__) ++#define I915_HAVE_FENCE ++#define I915_HAVE_BUFFER ++#define I915_HAVE_GEM ++#endif ++ ++/* Interface history: ++ * ++ * 1.1: Original. ++ * 1.2: Add Power Management ++ * 1.3: Add vblank support ++ * 1.4: Fix cmdbuffer path, add heap destroy ++ * 1.5: Add vblank pipe configuration ++ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank ++ * - Support vertical blank on secondary display pipe ++ * 1.8: New ioctl for ARB_Occlusion_Query ++ * 1.9: Usable page flipping and triple buffering ++ * 1.10: Plane/pipe disentangling ++ * 1.11: TTM superioctl ++ * 1.12: TTM relocation optimization ++ */ ++#define DRIVER_MAJOR 1 ++#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) ++#define DRIVER_MINOR 13 ++#else ++#define DRIVER_MINOR 6 ++#endif ++#define DRIVER_PATCHLEVEL 0 ++ ++enum pipe { ++ PIPE_A = 0, ++ PIPE_B, ++}; ++ ++#ifdef I915_HAVE_BUFFER ++#define I915_MAX_VALIDATE_BUFFERS 4096 ++struct drm_i915_validate_buffer; ++#endif ++ ++#define WATCH_COHERENCY 0 ++#define WATCH_BUF 0 ++#define WATCH_EXEC 0 ++#define WATCH_LRU 0 ++#define WATCH_RELOC 0 ++#define WATCH_INACTIVE 0 ++#define WATCH_PWRITE 0 ++ ++typedef struct _drm_i915_ring_buffer { ++ int tail_mask; ++ unsigned long Size; ++ u8 *virtual_start; ++ int head; ++ int tail; ++ int space; ++ drm_local_map_t map; ++ struct drm_gem_object *ring_obj; ++} drm_i915_ring_buffer_t; ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ int start; ++ int size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++}; ++ ++typedef struct _drm_i915_vbl_swap { ++ struct list_head head; ++ drm_drawable_t drw_id; ++ unsigned int plane; ++ unsigned int sequence; ++ int flip; ++} drm_i915_vbl_swap_t; ++ ++#ifdef __linux__ ++struct opregion_header; ++struct opregion_acpi; ++struct opregion_swsci; ++struct opregion_asle; ++ ++struct intel_opregion { ++ struct opregion_header *header; ++ struct opregion_acpi *acpi; ++ struct opregion_swsci *swsci; ++ struct opregion_asle *asle; ++ ++ int enabled; ++}; ++#endif ++ ++typedef struct drm_i915_private { ++ struct drm_device *dev; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio_map; ++ ++ drm_i915_sarea_t *sarea_priv; ++ drm_i915_ring_buffer_t ring; ++ ++ drm_dma_handle_t *status_page_dmah; ++ void *hw_status_page; ++ dma_addr_t dma_status_page; ++ uint32_t counter; ++ unsigned int status_gfx_addr; ++ drm_local_map_t hws_map; ++ struct drm_gem_object *hws_obj; ++ ++ unsigned int cpp; ++ ++ wait_queue_head_t irq_queue; ++ atomic_t irq_received; ++ ++ int tex_lru_log_granularity; ++ int allow_batchbuffer; ++ struct mem_block *agp_heap; ++ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; ++ int vblank_pipe; ++ DRM_SPINTYPE user_irq_lock; ++ int user_irq_refcount; ++ int fence_irq_on; ++ uint32_t irq_mask_reg; ++ int irq_enabled; ++ ++#ifdef I915_HAVE_FENCE ++ uint32_t flush_sequence; ++ uint32_t flush_flags; ++ uint32_t flush_pending; ++ uint32_t saved_flush_status; ++#endif ++#ifdef I915_HAVE_BUFFER ++ void *agp_iomap; ++ unsigned int max_validate_buffers; ++ struct mutex cmdbuf_mutex; ++ struct drm_i915_validate_buffer *val_bufs; ++#endif ++ ++ DRM_SPINTYPE swaps_lock; ++ drm_i915_vbl_swap_t vbl_swaps; ++ unsigned int swaps_pending; ++#if defined(I915_HAVE_BUFFER) ++ /* DRI2 sarea */ ++ struct drm_buffer_object *sarea_bo; ++ struct drm_bo_kmap_obj sarea_kmap; ++#endif ++ ++#ifdef __linux__ ++ struct intel_opregion opregion; ++#endif ++ ++ /* Register state */ ++ u8 saveLBB; ++ u32 saveDSPACNTR; ++ u32 saveDSPBCNTR; ++ u32 saveDSPARB; ++ u32 savePIPEACONF; ++ u32 savePIPEBCONF; ++ u32 savePIPEASRC; ++ u32 savePIPEBSRC; ++ u32 saveFPA0; ++ u32 saveFPA1; ++ u32 saveDPLL_A; ++ u32 saveDPLL_A_MD; ++ u32 saveHTOTAL_A; ++ u32 saveHBLANK_A; ++ u32 saveHSYNC_A; ++ u32 saveVTOTAL_A; ++ u32 saveVBLANK_A; ++ u32 saveVSYNC_A; ++ u32 saveBCLRPAT_A; ++ u32 savePIPEASTAT; ++ u32 saveDSPASTRIDE; ++ u32 saveDSPASIZE; ++ u32 saveDSPAPOS; ++ u32 saveDSPAADDR; ++ u32 saveDSPASURF; ++ u32 saveDSPATILEOFF; ++ u32 savePFIT_PGM_RATIOS; ++ u32 saveBLC_PWM_CTL; ++ u32 saveBLC_PWM_CTL2; ++ u32 saveFPB0; ++ u32 saveFPB1; ++ u32 saveDPLL_B; ++ u32 saveDPLL_B_MD; ++ u32 saveHTOTAL_B; ++ u32 saveHBLANK_B; ++ u32 saveHSYNC_B; ++ u32 saveVTOTAL_B; ++ u32 saveVBLANK_B; ++ u32 saveVSYNC_B; ++ u32 saveBCLRPAT_B; ++ u32 savePIPEBSTAT; ++ u32 saveDSPBSTRIDE; ++ u32 saveDSPBSIZE; ++ u32 saveDSPBPOS; ++ u32 saveDSPBADDR; ++ u32 saveDSPBSURF; ++ u32 saveDSPBTILEOFF; ++ u32 saveVGA0; ++ u32 saveVGA1; ++ u32 saveVGA_PD; ++ u32 saveVGACNTRL; ++ u32 saveADPA; ++ u32 saveLVDS; ++ u32 savePP_ON_DELAYS; ++ u32 savePP_OFF_DELAYS; ++ u32 saveDVOA; ++ u32 saveDVOB; ++ u32 saveDVOC; ++ u32 savePP_ON; ++ u32 savePP_OFF; ++ u32 savePP_CONTROL; ++ u32 savePP_DIVISOR; ++ u32 savePFIT_CONTROL; ++ u32 save_palette_a[256]; ++ u32 save_palette_b[256]; ++ u32 saveFBC_CFB_BASE; ++ u32 saveFBC_LL_BASE; ++ u32 saveFBC_CONTROL; ++ u32 saveFBC_CONTROL2; ++ u32 saveIER; ++ u32 saveIIR; ++ u32 saveIMR; ++ u32 saveCACHE_MODE_0; ++ u32 saveD_STATE; ++ u32 saveCG_2D_DIS; ++ u32 saveMI_ARB_STATE; ++ u32 saveSWF0[16]; ++ u32 saveSWF1[16]; ++ u32 saveSWF2[3]; ++ u8 saveMSR; ++ u8 saveSR[8]; ++ u8 saveGR[25]; ++ u8 saveAR_INDEX; ++ u8 saveAR[21]; ++ u8 saveDACMASK; ++ u8 saveDACDATA[256*3]; /* 256 3-byte colors */ ++ u8 saveCR[37]; ++ ++ struct { ++#ifdef __linux__ ++ struct drm_mm gtt_space; ++#endif ++ /** ++ * List of objects currently involved in rendering from the ++ * ringbuffer. ++ * ++ * A reference is held on the buffer while on this list. ++ */ ++ struct list_head active_list; ++ ++ /** ++ * List of objects which are not in the ringbuffer but which ++ * still have a write_domain which needs to be flushed before ++ * unbinding. ++ * ++ * A reference is held on the buffer while on this list. ++ */ ++ struct list_head flushing_list; ++ ++ /** ++ * LRU list of objects which are not in the ringbuffer and ++ * are ready to unbind, but are still in the GTT. ++ * ++ * A reference is not held on the buffer while on this list, ++ * as merely being GTT-bound shouldn't prevent its being ++ * freed, and we'll pull it off the list in the free path. ++ */ ++ struct list_head inactive_list; ++ ++ /** ++ * List of breadcrumbs associated with GPU requests currently ++ * outstanding. ++ */ ++ struct list_head request_list; ++#ifdef __linux__ ++ /** ++ * We leave the user IRQ off as much as possible, ++ * but this means that requests will finish and never ++ * be retired once the system goes idle. Set a timer to ++ * fire periodically while the ring is running. When it ++ * fires, go retire requests. ++ */ ++ struct delayed_work retire_work; ++#endif ++ uint32_t next_gem_seqno; ++ ++ /** ++ * Waiting sequence number, if any ++ */ ++ uint32_t waiting_gem_seqno; ++ ++ /** ++ * Last seq seen at irq time ++ */ ++ uint32_t irq_gem_seqno; ++ ++ /** ++ * Flag if the X Server, and thus DRM, is not currently in ++ * control of the device. ++ * ++ * This is set between LeaveVT and EnterVT. It needs to be ++ * replaced with a semaphore. It also needs to be ++ * transitioned away from for kernel modesetting. ++ */ ++ int suspended; ++ ++ /** ++ * Flag if the hardware appears to be wedged. ++ * ++ * This is set when attempts to idle the device timeout. ++ * It prevents command submission from occuring and makes ++ * every pending request fail ++ */ ++ int wedged; ++ ++ /** Bit 6 swizzling required for X tiling */ ++ uint32_t bit_6_swizzle_x; ++ /** Bit 6 swizzling required for Y tiling */ ++ uint32_t bit_6_swizzle_y; ++ } mm; ++} drm_i915_private_t; ++ ++struct drm_i915_file_private { ++ struct { ++ uint32_t last_gem_seqno; ++ uint32_t last_gem_throttle_seqno; ++ } mm; ++}; ++ ++enum intel_chip_family { ++ CHIP_I8XX = 0x01, ++ CHIP_I9XX = 0x02, ++ CHIP_I915 = 0x04, ++ CHIP_I965 = 0x08, ++}; ++ ++/** driver private structure attached to each drm_gem_object */ ++struct drm_i915_gem_object { ++ struct drm_gem_object *obj; ++ ++ /** Current space allocated to this object in the GTT, if any. */ ++ struct drm_mm_node *gtt_space; ++ ++ /** This object's place on the active/flushing/inactive lists */ ++ struct list_head list; ++ ++ /** ++ * This is set if the object is on the active or flushing lists ++ * (has pending rendering), and is not set if it's on inactive (ready ++ * to be unbound). ++ */ ++ int active; ++ ++ /** ++ * This is set if the object has been written to since last bound ++ * to the GTT ++ */ ++ int dirty; ++ ++ /** AGP memory structure for our GTT binding. */ ++ DRM_AGP_MEM *agp_mem; ++ ++ struct page **page_list; ++ ++ /** ++ * Current offset of the object in GTT space. ++ * ++ * This is the same as gtt_space->start ++ */ ++ uint32_t gtt_offset; ++ ++ /** Boolean whether this object has a valid gtt offset. */ ++ int gtt_bound; ++ ++ /** How many users have pinned this object in GTT space */ ++ int pin_count; ++ ++ /** Breadcrumb of last rendering to the buffer. */ ++ uint32_t last_rendering_seqno; ++ ++ /** Current tiling mode for the object. */ ++ uint32_t tiling_mode; ++ ++ /** ++ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when ++ * GEM_DOMAIN_CPU is not in the object's read domain. ++ */ ++ uint8_t *page_cpu_valid; ++}; ++ ++/** ++ * Request queue structure. ++ * ++ * The request queue allows us to note sequence numbers that have been emitted ++ * and may be associated with active buffers to be retired. ++ * ++ * By keeping this list, we can avoid having to do questionable ++ * sequence-number comparisons on buffer last_rendering_seqnos, and associate ++ * an emission time with seqnos for tracking how far ahead of the GPU we are. ++ */ ++struct drm_i915_gem_request { ++ /** GEM sequence number associated with this request. */ ++ uint32_t seqno; ++ ++ /** Time at which this request was emitted, in jiffies. */ ++ unsigned long emitted_jiffies; ++ ++ /** Cache domains that were flushed at the start of the request. */ ++ uint32_t flush_domains; ++ ++ struct list_head list; ++}; ++ ++extern struct drm_ioctl_desc i915_ioctls[]; ++extern int i915_max_ioctl; ++ ++ /* i915_dma.c */ ++extern void i915_kernel_lost_context(struct drm_device * dev); ++extern int i915_driver_load(struct drm_device *, unsigned long flags); ++extern int i915_driver_unload(struct drm_device *); ++extern void i915_driver_lastclose(struct drm_device * dev); ++extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); ++extern void i915_driver_preclose(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern void i915_driver_postclose(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern int i915_driver_device_is_agp(struct drm_device * dev); ++extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++extern void i915_emit_breadcrumb(struct drm_device *dev); ++extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); ++extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); ++extern int i915_driver_firstopen(struct drm_device *dev); ++extern int i915_dispatch_batchbuffer(struct drm_device * dev, ++ drm_i915_batchbuffer_t * batch); ++extern int i915_quiescent(struct drm_device *dev); ++extern int i915_init_hardware_status(struct drm_device *dev); ++extern void i915_free_hardware_status(struct drm_device *dev); ++ ++int i915_emit_box(struct drm_device * dev, ++ struct drm_clip_rect __user * boxes, ++ int i, int DR1, int DR4); ++ ++/* i915_irq.c */ ++extern int i915_irq_emit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_irq_wait(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); ++extern void i915_driver_irq_preinstall(struct drm_device * dev); ++extern int i915_driver_irq_postinstall(struct drm_device * dev); ++extern void i915_driver_irq_uninstall(struct drm_device * dev); ++extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_emit_irq(struct drm_device * dev); ++extern int i915_wait_irq(struct drm_device * dev, int irq_nr); ++extern int i915_enable_vblank(struct drm_device *dev, int crtc); ++extern void i915_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int i915_vblank_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void i915_user_irq_on(drm_i915_private_t *dev_priv); ++extern void i915_user_irq_off(drm_i915_private_t *dev_priv); ++ ++/* i915_mem.c */ ++extern int i915_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_init_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void i915_mem_takedown(struct mem_block **heap); ++extern void i915_mem_release(struct drm_device * dev, ++ struct drm_file *file_priv, ++ struct mem_block *heap); ++ ++/* i915_suspend.c */ ++extern int i915_save_state(struct drm_device *dev); ++extern int i915_restore_state(struct drm_device *dev); ++ ++#ifdef I915_HAVE_FENCE ++/* i915_fence.c */ ++extern void i915_fence_handler(struct drm_device *dev); ++extern void i915_invalidate_reported_sequence(struct drm_device *dev); ++ ++#endif ++ ++#ifdef I915_HAVE_BUFFER ++/* i915_buffer.c */ ++extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); ++extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, ++ uint32_t *type); ++extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); ++extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man); ++extern uint64_t i915_evict_flags(struct drm_buffer_object *bo); ++extern int i915_move(struct drm_buffer_object *bo, int evict, ++ int no_wait, struct drm_bo_mem_reg *new_mem); ++void i915_flush_ttm(struct drm_ttm *ttm); ++/* i915_execbuf.c */ ++int i915_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++/* i915_gem.c */ ++int i915_gem_init_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_busy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_set_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++int i915_gem_get_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++void i915_gem_load(struct drm_device *dev); ++int i915_gem_proc_init(struct drm_minor *minor); ++void i915_gem_proc_cleanup(struct drm_minor *minor); ++int i915_gem_init_object(struct drm_gem_object *obj); ++void i915_gem_free_object(struct drm_gem_object *obj); ++int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); ++void i915_gem_object_unpin(struct drm_gem_object *obj); ++void i915_gem_lastclose(struct drm_device *dev); ++uint32_t i915_get_gem_seqno(struct drm_device *dev); ++void i915_gem_retire_requests(struct drm_device *dev); ++void i915_gem_retire_work_handler(struct work_struct *work); ++void i915_gem_clflush_object(struct drm_gem_object *obj); ++#endif ++ ++/* i915_gem_tiling.c */ ++void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); ++ ++/* i915_gem_debug.c */ ++#if WATCH_INACTIVE ++void i915_verify_inactive(struct drm_device *dev, char *file, int line); ++#else ++#define i915_verify_inactive(dev,file,line) ++#endif ++void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); ++void i915_gem_dump_object(struct drm_gem_object *obj, int len, ++ const char *where, uint32_t mark); ++void i915_dump_lru(struct drm_device *dev, const char *where); ++ ++#ifdef __linux__ ++/* i915_opregion.c */ ++extern int intel_opregion_init(struct drm_device *dev); ++extern void intel_opregion_free(struct drm_device *dev); ++extern void opregion_asle_intr(struct drm_device *dev); ++extern void opregion_enable_asle(struct drm_device *dev); ++#endif ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ++extern void intel_init_chipset_flush_compat(struct drm_device *dev); ++extern void intel_fini_chipset_flush_compat(struct drm_device *dev); ++#endif ++#endif ++ ++#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) ++#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) ++#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) ++#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) ++#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) ++#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) ++ ++#if defined(__FreeBSD__) ++typedef boolean_t bool; ++#endif ++ ++#define I915_VERBOSE 0 ++#define I915_RING_VALIDATE 0 ++ ++#define PRIMARY_RINGBUFFER_SIZE (128*1024) ++ ++#define RING_LOCALS unsigned int outring, ringmask, outcount; \ ++ volatile char *virt; ++ ++#if I915_RING_VALIDATE ++void i915_ring_validate(struct drm_device *dev, const char *func, int line); ++#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__) ++#else ++#define I915_RING_DO_VALIDATE(dev) ++#endif ++ ++#define BEGIN_LP_RING(n) do { \ ++ if (I915_VERBOSE) \ ++ DRM_DEBUG("BEGIN_LP_RING(%d)\n", \ ++ (n)); \ ++ I915_RING_DO_VALIDATE(dev); \ ++ if (dev_priv->ring.space < (n)*4) \ ++ i915_wait_ring(dev, (n)*4, __FUNCTION__); \ ++ outcount = 0; \ ++ outring = dev_priv->ring.tail; \ ++ ringmask = dev_priv->ring.tail_mask; \ ++ virt = dev_priv->ring.virtual_start; \ ++} while (0) ++ ++#define OUT_RING(n) do { \ ++ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ ++ *(volatile unsigned int *)(virt + outring) = (n); \ ++ outcount++; \ ++ outring += 4; \ ++ outring &= ringmask; \ ++} while (0) ++ ++#define ADVANCE_LP_RING() do { \ ++ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ ++ I915_RING_DO_VALIDATE(dev); \ ++ dev_priv->ring.tail = outring; \ ++ dev_priv->ring.space -= outcount * 4; \ ++ I915_WRITE(PRB0_TAIL, outring); \ ++} while(0) ++ ++extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); ++ ++#define BREADCRUMB_BITS 31 ++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) ++ ++#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) ++/** ++ * Reads a dword out of the status page, which is written to from the command ++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or ++ * MI_STORE_DATA_IMM. ++ * ++ * The following dwords have a reserved meaning: ++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes. ++ * 4: ring 0 head pointer ++ * 5: ring 1 head pointer (915-class) ++ * 6: ring 2 head pointer (915-class) ++ * ++ * The area from dword 0x10 to 0x3ff is available for driver usage. ++ */ ++#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) ++#define I915_GEM_HWS_INDEX 0x10 ++ ++/* MCH MMIO space */ ++/** 915-945 and GM965 MCH register controlling DRAM channel access */ ++#define DCC 0x200 ++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) ++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) ++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) ++#define DCC_ADDRESSING_MODE_MASK (3 << 0) ++#define DCC_CHANNEL_XOR_DISABLE (1 << 10) ++ ++/** 965 MCH register controlling DRAM channel configuration */ ++#define CHDECMISC 0x111 ++#define CHDECMISC_FLEXMEMORY (1 << 1) ++ ++/* ++ * The Bridge device's PCI config space has information about the ++ * fb aperture size and the amount of pre-reserved memory. ++ */ ++#define INTEL_GMCH_CTRL 0x52 ++#define INTEL_GMCH_ENABLED 0x4 ++#define INTEL_GMCH_MEM_MASK 0x1 ++#define INTEL_GMCH_MEM_64M 0x1 ++#define INTEL_GMCH_MEM_128M 0 ++ ++#define INTEL_855_GMCH_GMS_MASK (0x7 << 4) ++#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) ++#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) ++ ++#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) ++#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) ++ ++/* PCI config space */ ++ ++#define HPLLCC 0xc0 /* 855 only */ ++#define GC_CLOCK_CONTROL_MASK (3 << 0) ++#define GC_CLOCK_133_200 (0 << 0) ++#define GC_CLOCK_100_200 (1 << 0) ++#define GC_CLOCK_100_133 (2 << 0) ++#define GC_CLOCK_166_250 (3 << 0) ++#define GCFGC 0xf0 /* 915+ only */ ++#define GC_LOW_FREQUENCY_ENABLE (1 << 7) ++#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) ++#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) ++#define GC_DISPLAY_CLOCK_MASK (7 << 4) ++#define LBB 0xf4 ++ ++/* VGA stuff */ ++ ++#define VGA_ST01_MDA 0x3ba ++#define VGA_ST01_CGA 0x3da ++ ++#define VGA_MSR_WRITE 0x3c2 ++#define VGA_MSR_READ 0x3cc ++#define VGA_MSR_MEM_EN (1<<1) ++#define VGA_MSR_CGA_MODE (1<<0) ++ ++#define VGA_SR_INDEX 0x3c4 ++#define VGA_SR_DATA 0x3c5 ++ ++#define VGA_AR_INDEX 0x3c0 ++#define VGA_AR_VID_EN (1<<5) ++#define VGA_AR_DATA_WRITE 0x3c0 ++#define VGA_AR_DATA_READ 0x3c1 ++ ++#define VGA_GR_INDEX 0x3ce ++#define VGA_GR_DATA 0x3cf ++/* GR05 */ ++#define VGA_GR_MEM_READ_MODE_SHIFT 3 ++#define VGA_GR_MEM_READ_MODE_PLANE 1 ++/* GR06 */ ++#define VGA_GR_MEM_MODE_MASK 0xc ++#define VGA_GR_MEM_MODE_SHIFT 2 ++#define VGA_GR_MEM_A0000_AFFFF 0 ++#define VGA_GR_MEM_A0000_BFFFF 1 ++#define VGA_GR_MEM_B0000_B7FFF 2 ++#define VGA_GR_MEM_B0000_BFFFF 3 ++ ++#define VGA_DACMASK 0x3c6 ++#define VGA_DACRX 0x3c7 ++#define VGA_DACWX 0x3c8 ++#define VGA_DACDATA 0x3c9 ++ ++#define VGA_CR_INDEX_MDA 0x3b4 ++#define VGA_CR_DATA_MDA 0x3b5 ++#define VGA_CR_INDEX_CGA 0x3d4 ++#define VGA_CR_DATA_CGA 0x3d5 ++ ++/* ++ * Memory interface instructions used by the kernel ++ */ ++#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) ++ ++#define MI_NOOP MI_INSTR(0, 0) ++#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) ++#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) ++#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) ++#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) ++#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) ++#define MI_FLUSH MI_INSTR(0x04, 0) ++#define MI_READ_FLUSH (1 << 0) ++#define MI_EXE_FLUSH (1 << 1) ++#define MI_NO_WRITE_FLUSH (1 << 2) ++#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ ++#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ ++#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) ++#define MI_REPORT_HEAD MI_INSTR(0x07, 0) ++#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) ++#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) ++#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ ++#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) ++#define MI_STORE_DWORD_INDEX_SHIFT 2 ++#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) ++#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) ++#define MI_BATCH_NON_SECURE (1) ++#define MI_BATCH_NON_SECURE_I965 (1<<8) ++#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) ++ ++/* ++ * 3D instructions used by the kernel ++ */ ++#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) ++ ++#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) ++#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define SC_UPDATE_SCISSOR (0x1<<1) ++#define SC_ENABLE_MASK (0x1<<0) ++#define SC_ENABLE (0x1<<0) ++#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) ++#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) ++#define SCI_YMIN_MASK (0xffff<<16) ++#define SCI_XMIN_MASK (0xffff<<0) ++#define SCI_YMAX_MASK (0xffff<<16) ++#define SCI_XMAX_MASK (0xffff<<0) ++#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) ++#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) ++#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) ++#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) ++#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) ++#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) ++#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) ++#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) ++#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) ++#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) ++#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) ++#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) ++#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) ++#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) ++#define BLT_DEPTH_8 (0<<24) ++#define BLT_DEPTH_16_565 (1<<24) ++#define BLT_DEPTH_16_1555 (2<<24) ++#define BLT_DEPTH_32 (3<<24) ++#define BLT_ROP_GXCOPY (0xcc<<16) ++#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ ++#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ ++#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) ++#define ASYNC_FLIP (1<<22) ++#define DISPLAY_PLANE_A (0<<20) ++#define DISPLAY_PLANE_B (1<<20) ++ ++/* ++ * Instruction and interrupt control regs ++ */ ++ ++#define PRB0_TAIL 0x02030 ++#define PRB0_HEAD 0x02034 ++#define PRB0_START 0x02038 ++#define PRB0_CTL 0x0203c ++#define TAIL_ADDR 0x001FFFF8 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_NR_PAGES 0x001FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++#define PRB1_TAIL 0x02040 /* 915+ only */ ++#define PRB1_HEAD 0x02044 /* 915+ only */ ++#define PRB1_START 0x02048 /* 915+ only */ ++#define PRB1_CTL 0x0204c /* 915+ only */ ++#define ACTHD_I965 0x02074 ++#define HWS_PGA 0x02080 ++#define HWS_ADDRESS_MASK 0xfffff000 ++#define HWS_START_ADDRESS_SHIFT 4 ++#define IPEIR 0x02088 ++#define NOPID 0x02094 ++#define HWSTAM 0x02098 ++#define SCPD0 0x0209c /* 915+ only */ ++#define IER 0x020a0 ++#define IIR 0x020a4 ++#define IMR 0x020a8 ++#define ISR 0x020ac ++#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) ++#define I915_DISPLAY_PORT_INTERRUPT (1<<17) ++#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) ++#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) ++#define I915_HWB_OOM_INTERRUPT (1<<13) ++#define I915_SYNC_STATUS_INTERRUPT (1<<12) ++#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) ++#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) ++#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) ++#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) ++#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) ++#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) ++#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) ++#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) ++#define I915_DEBUG_INTERRUPT (1<<2) ++#define I915_USER_INTERRUPT (1<<1) ++#define I915_ASLE_INTERRUPT (1<<0) ++#define EIR 0x020b0 ++#define EMR 0x020b4 ++#define ESR 0x020b8 ++#define INSTPM 0x020c0 ++#define ACTHD 0x020c8 ++#define FW_BLC 0x020d8 ++#define FW_BLC_SELF 0x020e0 /* 915+ only */ ++#define MI_ARB_STATE 0x020e4 /* 915+ only */ ++#define CACHE_MODE_0 0x02120 /* 915+ only */ ++#define CM0_MASK_SHIFT 16 ++#define CM0_IZ_OPT_DISABLE (1<<6) ++#define CM0_ZR_OPT_DISABLE (1<<5) ++#define CM0_DEPTH_EVICT_DISABLE (1<<4) ++#define CM0_COLOR_EVICT_DISABLE (1<<3) ++#define CM0_DEPTH_WRITE_DISABLE (1<<1) ++#define CM0_RC_OP_FLUSH_DISABLE (1<<0) ++#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ ++ ++/* ++ * Framebuffer compression (915+ only) ++ */ ++ ++#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ ++#define FBC_LL_BASE 0x03204 /* 4k page aligned */ ++#define FBC_CONTROL 0x03208 ++#define FBC_CTL_EN (1<<31) ++#define FBC_CTL_PERIODIC (1<<30) ++#define FBC_CTL_INTERVAL_SHIFT (16) ++#define FBC_CTL_UNCOMPRESSIBLE (1<<14) ++#define FBC_CTL_STRIDE_SHIFT (5) ++#define FBC_CTL_FENCENO (1<<0) ++#define FBC_COMMAND 0x0320c ++#define FBC_CMD_COMPRESS (1<<0) ++#define FBC_STATUS 0x03210 ++#define FBC_STAT_COMPRESSING (1<<31) ++#define FBC_STAT_COMPRESSED (1<<30) ++#define FBC_STAT_MODIFIED (1<<29) ++#define FBC_STAT_CURRENT_LINE (1<<0) ++#define FBC_CONTROL2 0x03214 ++#define FBC_CTL_FENCE_DBL (0<<4) ++#define FBC_CTL_IDLE_IMM (0<<2) ++#define FBC_CTL_IDLE_FULL (1<<2) ++#define FBC_CTL_IDLE_LINE (2<<2) ++#define FBC_CTL_IDLE_DEBUG (3<<2) ++#define FBC_CTL_CPU_FENCE (1<<1) ++#define FBC_CTL_PLANEA (0<<0) ++#define FBC_CTL_PLANEB (1<<0) ++#define FBC_FENCE_OFF 0x0321b ++ ++#define FBC_LL_SIZE (1536) ++ ++/* ++ * GPIO regs ++ */ ++#define GPIOA 0x5010 ++#define GPIOB 0x5014 ++#define GPIOC 0x5018 ++#define GPIOD 0x501c ++#define GPIOE 0x5020 ++#define GPIOF 0x5024 ++#define GPIOG 0x5028 ++#define GPIOH 0x502c ++# define GPIO_CLOCK_DIR_MASK (1 << 0) ++# define GPIO_CLOCK_DIR_IN (0 << 1) ++# define GPIO_CLOCK_DIR_OUT (1 << 1) ++# define GPIO_CLOCK_VAL_MASK (1 << 2) ++# define GPIO_CLOCK_VAL_OUT (1 << 3) ++# define GPIO_CLOCK_VAL_IN (1 << 4) ++# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) ++# define GPIO_DATA_DIR_MASK (1 << 8) ++# define GPIO_DATA_DIR_IN (0 << 9) ++# define GPIO_DATA_DIR_OUT (1 << 9) ++# define GPIO_DATA_VAL_MASK (1 << 10) ++# define GPIO_DATA_VAL_OUT (1 << 11) ++# define GPIO_DATA_VAL_IN (1 << 12) ++# define GPIO_DATA_PULLUP_DISABLE (1 << 13) ++ ++/* ++ * Clock control & power management ++ */ ++ ++#define VGA0 0x6000 ++#define VGA1 0x6004 ++#define VGA_PD 0x6010 ++#define VGA0_PD_P2_DIV_4 (1 << 7) ++#define VGA0_PD_P1_DIV_2 (1 << 5) ++#define VGA0_PD_P1_SHIFT 0 ++#define VGA0_PD_P1_MASK (0x1f << 0) ++#define VGA1_PD_P2_DIV_4 (1 << 15) ++#define VGA1_PD_P1_DIV_2 (1 << 13) ++#define VGA1_PD_P1_SHIFT 8 ++#define VGA1_PD_P1_MASK (0x1f << 8) ++#define DPLL_A 0x06014 ++#define DPLL_B 0x06018 ++#define DPLL_VCO_ENABLE (1 << 31) ++#define DPLL_DVO_HIGH_SPEED (1 << 30) ++#define DPLL_SYNCLOCK_ENABLE (1 << 29) ++#define DPLL_VGA_MODE_DIS (1 << 28) ++#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ ++#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ ++#define DPLL_MODE_MASK (3 << 26) ++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ ++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ ++#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ ++#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ ++#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ ++#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ ++ ++#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) ++#define I915_CRC_ERROR_ENABLE (1UL<<29) ++#define I915_CRC_DONE_ENABLE (1UL<<28) ++#define I915_GMBUS_EVENT_ENABLE (1UL<<27) ++#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) ++#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) ++#define I915_DPST_EVENT_ENABLE (1UL<<23) ++#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) ++#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) ++#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) ++#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ ++#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) ++#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) ++#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) ++#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) ++#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) ++#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) ++#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) ++#define I915_DPST_EVENT_STATUS (1UL<<7) ++#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) ++#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) ++#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) ++#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ ++#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) ++#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) ++ ++#define SRX_INDEX 0x3c4 ++#define SRX_DATA 0x3c5 ++#define SR01 1 ++#define SR01_SCREEN_OFF (1<<5) ++ ++#define PPCR 0x61204 ++#define PPCR_ON (1<<0) ++ ++#define DVOB 0x61140 ++#define DVOB_ON (1<<31) ++#define DVOC 0x61160 ++#define DVOC_ON (1<<31) ++#define LVDS 0x61180 ++#define LVDS_ON (1<<31) ++ ++#define ADPA 0x61100 ++#define ADPA_DPMS_MASK (~(3<<10)) ++#define ADPA_DPMS_ON (0<<10) ++#define ADPA_DPMS_SUSPEND (1<<10) ++#define ADPA_DPMS_STANDBY (2<<10) ++#define ADPA_DPMS_OFF (3<<10) ++ ++#define RING_TAIL 0x00 ++#define TAIL_ADDR 0x001FFFF8 ++#define RING_HEAD 0x04 ++#define HEAD_WRAP_COUNT 0xFFE00000 ++#define HEAD_WRAP_ONE 0x00200000 ++#define HEAD_ADDR 0x001FFFFC ++#define RING_START 0x08 ++#define START_ADDR 0xFFFFF000 ++#define RING_LEN 0x0C ++#define RING_NR_PAGES 0x001FF000 ++#define RING_REPORT_MASK 0x00000006 ++#define RING_REPORT_64K 0x00000002 ++#define RING_REPORT_128K 0x00000004 ++#define RING_NO_REPORT 0x00000000 ++#define RING_VALID_MASK 0x00000001 ++#define RING_VALID 0x00000001 ++#define RING_INVALID 0x00000000 ++ ++/* Scratch pad debug 0 reg: ++ */ ++#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 ++/* ++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within ++ * this field (only one bit may be set). ++ */ ++#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 ++#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 ++/* i830, required in DVO non-gang */ ++#define PLL_P2_DIVIDE_BY_4 (1 << 23) ++#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ ++#define PLL_REF_INPUT_DREFCLK (0 << 13) ++#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ ++#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ ++#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) ++#define PLL_REF_INPUT_MASK (3 << 13) ++#define PLL_LOAD_PULSE_PHASE_SHIFT 9 ++/* ++ * Parallel to Serial Load Pulse phase selection. ++ * Selects the phase for the 10X DPLL clock for the PCIe ++ * digital display port. The range is 4 to 13; 10 or more ++ * is just a flip delay. The default is 6 ++ */ ++#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) ++#define DISPLAY_RATE_SELECT_FPA1 (1 << 8) ++/* ++ * SDVO multiplier for 945G/GM. Not used on 965. ++ */ ++#define SDVO_MULTIPLIER_MASK 0x000000ff ++#define SDVO_MULTIPLIER_SHIFT_HIRES 4 ++#define SDVO_MULTIPLIER_SHIFT_VGA 0 ++#define DPLL_A_MD 0x0601c /* 965+ only */ ++/* ++ * UDI pixel divider, controlling how many pixels are stuffed into a packet. ++ * ++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO. ++ */ ++#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 ++#define DPLL_MD_UDI_DIVIDER_SHIFT 24 ++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ ++#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 ++#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 ++/* ++ * SDVO/UDI pixel multiplier. ++ * ++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus ++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate ++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing ++ * dummy bytes in the datastream at an increased clock rate, with both sides of ++ * the link knowing how many bytes are fill. ++ * ++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock ++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be ++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and ++ * through an SDVO command. ++ * ++ * This register field has values of multiplication factor minus 1, with ++ * a maximum multiplier of 5 for SDVO. ++ */ ++#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 ++#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 ++/* ++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. ++ * This best be set to the default value (3) or the CRT won't work. No, ++ * I don't entirely understand what this does... ++ */ ++#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f ++#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 ++#define DPLL_B_MD 0x06020 /* 965+ only */ ++#define FPA0 0x06040 ++#define FPA1 0x06044 ++#define FPB0 0x06048 ++#define FPB1 0x0604c ++#define FP_N_DIV_MASK 0x003f0000 ++#define FP_N_DIV_SHIFT 16 ++#define FP_M1_DIV_MASK 0x00003f00 ++#define FP_M1_DIV_SHIFT 8 ++#define FP_M2_DIV_MASK 0x0000003f ++#define FP_M2_DIV_SHIFT 0 ++#define DPLL_TEST 0x606c ++#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) ++#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) ++#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) ++#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) ++#define DPLLB_TEST_N_BYPASS (1 << 19) ++#define DPLLB_TEST_M_BYPASS (1 << 18) ++#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) ++#define DPLLA_TEST_N_BYPASS (1 << 3) ++#define DPLLA_TEST_M_BYPASS (1 << 2) ++#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) ++#define D_STATE 0x6104 ++#define CG_2D_DIS 0x6200 ++#define CG_3D_DIS 0x6204 ++ ++/* ++ * Palette regs ++ */ ++ ++#define PALETTE_A 0x0a000 ++#define PALETTE_B 0x0a800 ++ ++/* ++ * Overlay regs ++ */ ++ ++#define OVADD 0x30000 ++#define DOVSTA 0x30008 ++#define OC_BUF (0x3<<20) ++#define OGAMC5 0x30010 ++#define OGAMC4 0x30014 ++#define OGAMC3 0x30018 ++#define OGAMC2 0x3001c ++#define OGAMC1 0x30020 ++#define OGAMC0 0x30024 ++ ++/* ++ * Display engine regs ++ */ ++ ++/* Pipe A timing regs */ ++#define HTOTAL_A 0x60000 ++#define HBLANK_A 0x60004 ++#define HSYNC_A 0x60008 ++#define VTOTAL_A 0x6000c ++#define VBLANK_A 0x60010 ++#define VSYNC_A 0x60014 ++#define PIPEASRC 0x6001c ++#define BCLRPAT_A 0x60020 ++ ++/* Pipe B timing regs */ ++#define HTOTAL_B 0x61000 ++#define HBLANK_B 0x61004 ++#define HSYNC_B 0x61008 ++#define VTOTAL_B 0x6100c ++#define VBLANK_B 0x61010 ++#define VSYNC_B 0x61014 ++#define PIPEBSRC 0x6101c ++#define BCLRPAT_B 0x61020 ++ ++/* VGA port control */ ++#define ADPA 0x61100 ++#define ADPA_DAC_ENABLE (1<<31) ++#define ADPA_DAC_DISABLE 0 ++#define ADPA_PIPE_SELECT_MASK (1<<30) ++#define ADPA_PIPE_A_SELECT 0 ++#define ADPA_PIPE_B_SELECT (1<<30) ++#define ADPA_USE_VGA_HVPOLARITY (1<<15) ++#define ADPA_SETS_HVPOLARITY 0 ++#define ADPA_VSYNC_CNTL_DISABLE (1<<11) ++#define ADPA_VSYNC_CNTL_ENABLE 0 ++#define ADPA_HSYNC_CNTL_DISABLE (1<<10) ++#define ADPA_HSYNC_CNTL_ENABLE 0 ++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) ++#define ADPA_VSYNC_ACTIVE_LOW 0 ++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) ++#define ADPA_HSYNC_ACTIVE_LOW 0 ++#define ADPA_DPMS_MASK (~(3<<10)) ++#define ADPA_DPMS_ON (0<<10) ++#define ADPA_DPMS_SUSPEND (1<<10) ++#define ADPA_DPMS_STANDBY (2<<10) ++#define ADPA_DPMS_OFF (3<<10) ++ ++/* Hotplug control (945+ only) */ ++#define PORT_HOTPLUG_EN 0x61110 ++#define SDVOB_HOTPLUG_INT_EN (1 << 26) ++#define SDVOC_HOTPLUG_INT_EN (1 << 25) ++#define TV_HOTPLUG_INT_EN (1 << 18) ++#define CRT_HOTPLUG_INT_EN (1 << 9) ++#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) ++ ++#define PORT_HOTPLUG_STAT 0x61114 ++#define CRT_HOTPLUG_INT_STATUS (1 << 11) ++#define TV_HOTPLUG_INT_STATUS (1 << 10) ++#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) ++#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) ++#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) ++#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) ++#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) ++#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) ++ ++/* SDVO port control */ ++#define SDVOB 0x61140 ++#define SDVOC 0x61160 ++#define SDVO_ENABLE (1 << 31) ++#define SDVO_PIPE_B_SELECT (1 << 30) ++#define SDVO_STALL_SELECT (1 << 29) ++#define SDVO_INTERRUPT_ENABLE (1 << 26) ++/** ++ * 915G/GM SDVO pixel multiplier. ++ * ++ * Programmed value is multiplier - 1, up to 5x. ++ * ++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK ++ */ ++#define SDVO_PORT_MULTIPLY_MASK (7 << 23) ++#define SDVO_PORT_MULTIPLY_SHIFT 23 ++#define SDVO_PHASE_SELECT_MASK (15 << 19) ++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) ++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) ++#define SDVOC_GANG_MODE (1 << 16) ++#define SDVO_BORDER_ENABLE (1 << 7) ++#define SDVOB_PCIE_CONCURRENCY (1 << 3) ++#define SDVO_DETECTED (1 << 2) ++/* Bits to be preserved when writing */ ++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) ++#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) ++ ++/* DVO port control */ ++#define DVOA 0x61120 ++#define DVOB 0x61140 ++#define DVOC 0x61160 ++#define DVO_ENABLE (1 << 31) ++#define DVO_PIPE_B_SELECT (1 << 30) ++#define DVO_PIPE_STALL_UNUSED (0 << 28) ++#define DVO_PIPE_STALL (1 << 28) ++#define DVO_PIPE_STALL_TV (2 << 28) ++#define DVO_PIPE_STALL_MASK (3 << 28) ++#define DVO_USE_VGA_SYNC (1 << 15) ++#define DVO_DATA_ORDER_I740 (0 << 14) ++#define DVO_DATA_ORDER_FP (1 << 14) ++#define DVO_VSYNC_DISABLE (1 << 11) ++#define DVO_HSYNC_DISABLE (1 << 10) ++#define DVO_VSYNC_TRISTATE (1 << 9) ++#define DVO_HSYNC_TRISTATE (1 << 8) ++#define DVO_BORDER_ENABLE (1 << 7) ++#define DVO_DATA_ORDER_GBRG (1 << 6) ++#define DVO_DATA_ORDER_RGGB (0 << 6) ++#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) ++#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) ++#define DVO_VSYNC_ACTIVE_HIGH (1 << 4) ++#define DVO_HSYNC_ACTIVE_HIGH (1 << 3) ++#define DVO_BLANK_ACTIVE_HIGH (1 << 2) ++#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ ++#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ ++#define DVO_PRESERVE_MASK (0x7<<24) ++#define DVOA_SRCDIM 0x61124 ++#define DVOB_SRCDIM 0x61144 ++#define DVOC_SRCDIM 0x61164 ++#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 ++#define DVO_SRCDIM_VERTICAL_SHIFT 0 ++ ++/* LVDS port control */ ++#define LVDS 0x61180 ++/* ++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as ++ * the DPLL semantics change when the LVDS is assigned to that pipe. ++ */ ++#define LVDS_PORT_EN (1 << 31) ++/* Selects pipe B for LVDS data. Must be set on pre-965. */ ++#define LVDS_PIPEB_SELECT (1 << 30) ++/* ++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per ++ * pixel. ++ */ ++#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) ++#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) ++#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) ++/* ++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit ++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be ++ * on. ++ */ ++#define LVDS_A3_POWER_MASK (3 << 6) ++#define LVDS_A3_POWER_DOWN (0 << 6) ++#define LVDS_A3_POWER_UP (3 << 6) ++/* ++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP ++ * is set. ++ */ ++#define LVDS_CLKB_POWER_MASK (3 << 4) ++#define LVDS_CLKB_POWER_DOWN (0 << 4) ++#define LVDS_CLKB_POWER_UP (3 << 4) ++/* ++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 ++ * setting for whether we are in dual-channel mode. The B3 pair will ++ * additionally only be powered up when LVDS_A3_POWER_UP is set. ++ */ ++#define LVDS_B0B3_POWER_MASK (3 << 2) ++#define LVDS_B0B3_POWER_DOWN (0 << 2) ++#define LVDS_B0B3_POWER_UP (3 << 2) ++ ++/* Panel power sequencing */ ++#define PP_STATUS 0x61200 ++#define PP_ON (1 << 31) ++/* ++ * Indicates that all dependencies of the panel are on: ++ * ++ * - PLL enabled ++ * - pipe enabled ++ * - LVDS/DVOB/DVOC on ++ */ ++#define PP_READY (1 << 30) ++#define PP_SEQUENCE_NONE (0 << 28) ++#define PP_SEQUENCE_ON (1 << 28) ++#define PP_SEQUENCE_OFF (2 << 28) ++#define PP_SEQUENCE_MASK 0x30000000 ++#define PP_CONTROL 0x61204 ++#define POWER_TARGET_ON (1 << 0) ++#define PP_ON_DELAYS 0x61208 ++#define PP_OFF_DELAYS 0x6120c ++#define PP_DIVISOR 0x61210 ++ ++/* Panel fitting */ ++#define PFIT_CONTROL 0x61230 ++#define PFIT_ENABLE (1 << 31) ++#define PFIT_PIPE_MASK (3 << 29) ++#define PFIT_PIPE_SHIFT 29 ++#define VERT_INTERP_DISABLE (0 << 10) ++#define VERT_INTERP_BILINEAR (1 << 10) ++#define VERT_INTERP_MASK (3 << 10) ++#define VERT_AUTO_SCALE (1 << 9) ++#define HORIZ_INTERP_DISABLE (0 << 6) ++#define HORIZ_INTERP_BILINEAR (1 << 6) ++#define HORIZ_INTERP_MASK (3 << 6) ++#define HORIZ_AUTO_SCALE (1 << 5) ++#define PANEL_8TO6_DITHER_ENABLE (1 << 3) ++#define PFIT_PGM_RATIOS 0x61234 ++#define PFIT_VERT_SCALE_MASK 0xfff00000 ++#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 ++#define PFIT_AUTO_RATIOS 0x61238 ++ ++/* Backlight control */ ++#define BLC_PWM_CTL 0x61254 ++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) ++#define BLC_PWM_CTL2 0x61250 /* 965+ only */ ++/* ++ * This is the most significant 15 bits of the number of backlight cycles in a ++ * complete cycle of the modulated backlight control. ++ * ++ * The actual value is this field multiplied by two. ++ */ ++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) ++#define BLM_LEGACY_MODE (1 << 16) ++/* ++ * This is the number of cycles out of the backlight modulation cycle for which ++ * the backlight is on. ++ * ++ * This field must be no greater than the number of cycles in the complete ++ * backlight modulation cycle. ++ */ ++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) ++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) ++ ++/* TV port control */ ++#define TV_CTL 0x68000 ++/** Enables the TV encoder */ ++# define TV_ENC_ENABLE (1 << 31) ++/** Sources the TV encoder input from pipe B instead of A. */ ++# define TV_ENC_PIPEB_SELECT (1 << 30) ++/** Outputs composite video (DAC A only) */ ++# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) ++/** Outputs SVideo video (DAC B/C) */ ++# define TV_ENC_OUTPUT_SVIDEO (1 << 28) ++/** Outputs Component video (DAC A/B/C) */ ++# define TV_ENC_OUTPUT_COMPONENT (2 << 28) ++/** Outputs Composite and SVideo (DAC A/B/C) */ ++# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) ++# define TV_TRILEVEL_SYNC (1 << 21) ++/** Enables slow sync generation (945GM only) */ ++# define TV_SLOW_SYNC (1 << 20) ++/** Selects 4x oversampling for 480i and 576p */ ++# define TV_OVERSAMPLE_4X (0 << 18) ++/** Selects 2x oversampling for 720p and 1080i */ ++# define TV_OVERSAMPLE_2X (1 << 18) ++/** Selects no oversampling for 1080p */ ++# define TV_OVERSAMPLE_NONE (2 << 18) ++/** Selects 8x oversampling */ ++# define TV_OVERSAMPLE_8X (3 << 18) ++/** Selects progressive mode rather than interlaced */ ++# define TV_PROGRESSIVE (1 << 17) ++/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ ++# define TV_PAL_BURST (1 << 16) ++/** Field for setting delay of Y compared to C */ ++# define TV_YC_SKEW_MASK (7 << 12) ++/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ ++# define TV_ENC_SDP_FIX (1 << 11) ++/** ++ * Enables a fix for the 915GM only. ++ * ++ * Not sure what it does. ++ */ ++# define TV_ENC_C0_FIX (1 << 10) ++/** Bits that must be preserved by software */ ++# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) ++# define TV_FUSE_STATE_MASK (3 << 4) ++/** Read-only state that reports all features enabled */ ++# define TV_FUSE_STATE_ENABLED (0 << 4) ++/** Read-only state that reports that Macrovision is disabled in hardware*/ ++# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) ++/** Read-only state that reports that TV-out is disabled in hardware. */ ++# define TV_FUSE_STATE_DISABLED (2 << 4) ++/** Normal operation */ ++# define TV_TEST_MODE_NORMAL (0 << 0) ++/** Encoder test pattern 1 - combo pattern */ ++# define TV_TEST_MODE_PATTERN_1 (1 << 0) ++/** Encoder test pattern 2 - full screen vertical 75% color bars */ ++# define TV_TEST_MODE_PATTERN_2 (2 << 0) ++/** Encoder test pattern 3 - full screen horizontal 75% color bars */ ++# define TV_TEST_MODE_PATTERN_3 (3 << 0) ++/** Encoder test pattern 4 - random noise */ ++# define TV_TEST_MODE_PATTERN_4 (4 << 0) ++/** Encoder test pattern 5 - linear color ramps */ ++# define TV_TEST_MODE_PATTERN_5 (5 << 0) ++/** ++ * This test mode forces the DACs to 50% of full output. ++ * ++ * This is used for load detection in combination with TVDAC_SENSE_MASK ++ */ ++# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) ++# define TV_TEST_MODE_MASK (7 << 0) ++ ++#define TV_DAC 0x68004 ++/** ++ * Reports that DAC state change logic has reported change (RO). ++ * ++ * This gets cleared when TV_DAC_STATE_EN is cleared ++*/ ++# define TVDAC_STATE_CHG (1 << 31) ++# define TVDAC_SENSE_MASK (7 << 28) ++/** Reports that DAC A voltage is above the detect threshold */ ++# define TVDAC_A_SENSE (1 << 30) ++/** Reports that DAC B voltage is above the detect threshold */ ++# define TVDAC_B_SENSE (1 << 29) ++/** Reports that DAC C voltage is above the detect threshold */ ++# define TVDAC_C_SENSE (1 << 28) ++/** ++ * Enables DAC state detection logic, for load-based TV detection. ++ * ++ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set ++ * to off, for load detection to work. ++ */ ++# define TVDAC_STATE_CHG_EN (1 << 27) ++/** Sets the DAC A sense value to high */ ++# define TVDAC_A_SENSE_CTL (1 << 26) ++/** Sets the DAC B sense value to high */ ++# define TVDAC_B_SENSE_CTL (1 << 25) ++/** Sets the DAC C sense value to high */ ++# define TVDAC_C_SENSE_CTL (1 << 24) ++/** Overrides the ENC_ENABLE and DAC voltage levels */ ++# define DAC_CTL_OVERRIDE (1 << 7) ++/** Sets the slew rate. Must be preserved in software */ ++# define ENC_TVDAC_SLEW_FAST (1 << 6) ++# define DAC_A_1_3_V (0 << 4) ++# define DAC_A_1_1_V (1 << 4) ++# define DAC_A_0_7_V (2 << 4) ++# define DAC_A_OFF (3 << 4) ++# define DAC_B_1_3_V (0 << 2) ++# define DAC_B_1_1_V (1 << 2) ++# define DAC_B_0_7_V (2 << 2) ++# define DAC_B_OFF (3 << 2) ++# define DAC_C_1_3_V (0 << 0) ++# define DAC_C_1_1_V (1 << 0) ++# define DAC_C_0_7_V (2 << 0) ++# define DAC_C_OFF (3 << 0) ++ ++/** ++ * CSC coefficients are stored in a floating point format with 9 bits of ++ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, ++ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with ++ * -1 (0x3) being the only legal negative value. ++ */ ++#define TV_CSC_Y 0x68010 ++# define TV_RY_MASK 0x07ff0000 ++# define TV_RY_SHIFT 16 ++# define TV_GY_MASK 0x00000fff ++# define TV_GY_SHIFT 0 ++ ++#define TV_CSC_Y2 0x68014 ++# define TV_BY_MASK 0x07ff0000 ++# define TV_BY_SHIFT 16 ++/** ++ * Y attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AY_MASK 0x000003ff ++# define TV_AY_SHIFT 0 ++ ++#define TV_CSC_U 0x68018 ++# define TV_RU_MASK 0x07ff0000 ++# define TV_RU_SHIFT 16 ++# define TV_GU_MASK 0x000007ff ++# define TV_GU_SHIFT 0 ++ ++#define TV_CSC_U2 0x6801c ++# define TV_BU_MASK 0x07ff0000 ++# define TV_BU_SHIFT 16 ++/** ++ * U attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AU_MASK 0x000003ff ++# define TV_AU_SHIFT 0 ++ ++#define TV_CSC_V 0x68020 ++# define TV_RV_MASK 0x0fff0000 ++# define TV_RV_SHIFT 16 ++# define TV_GV_MASK 0x000007ff ++# define TV_GV_SHIFT 0 ++ ++#define TV_CSC_V2 0x68024 ++# define TV_BV_MASK 0x07ff0000 ++# define TV_BV_SHIFT 16 ++/** ++ * V attenuation for component video. ++ * ++ * Stored in 1.9 fixed point. ++ */ ++# define TV_AV_MASK 0x000007ff ++# define TV_AV_SHIFT 0 ++ ++#define TV_CLR_KNOBS 0x68028 ++/** 2s-complement brightness adjustment */ ++# define TV_BRIGHTNESS_MASK 0xff000000 ++# define TV_BRIGHTNESS_SHIFT 24 ++/** Contrast adjustment, as a 2.6 unsigned floating point number */ ++# define TV_CONTRAST_MASK 0x00ff0000 ++# define TV_CONTRAST_SHIFT 16 ++/** Saturation adjustment, as a 2.6 unsigned floating point number */ ++# define TV_SATURATION_MASK 0x0000ff00 ++# define TV_SATURATION_SHIFT 8 ++/** Hue adjustment, as an integer phase angle in degrees */ ++# define TV_HUE_MASK 0x000000ff ++# define TV_HUE_SHIFT 0 ++ ++#define TV_CLR_LEVEL 0x6802c ++/** Controls the DAC level for black */ ++# define TV_BLACK_LEVEL_MASK 0x01ff0000 ++# define TV_BLACK_LEVEL_SHIFT 16 ++/** Controls the DAC level for blanking */ ++# define TV_BLANK_LEVEL_MASK 0x000001ff ++# define TV_BLANK_LEVEL_SHIFT 0 ++ ++#define TV_H_CTL_1 0x68030 ++/** Number of pixels in the hsync. */ ++# define TV_HSYNC_END_MASK 0x1fff0000 ++# define TV_HSYNC_END_SHIFT 16 ++/** Total number of pixels minus one in the line (display and blanking). */ ++# define TV_HTOTAL_MASK 0x00001fff ++# define TV_HTOTAL_SHIFT 0 ++ ++#define TV_H_CTL_2 0x68034 ++/** Enables the colorburst (needed for non-component color) */ ++# define TV_BURST_ENA (1 << 31) ++/** Offset of the colorburst from the start of hsync, in pixels minus one. */ ++# define TV_HBURST_START_SHIFT 16 ++# define TV_HBURST_START_MASK 0x1fff0000 ++/** Length of the colorburst */ ++# define TV_HBURST_LEN_SHIFT 0 ++# define TV_HBURST_LEN_MASK 0x0001fff ++ ++#define TV_H_CTL_3 0x68038 ++/** End of hblank, measured in pixels minus one from start of hsync */ ++# define TV_HBLANK_END_SHIFT 16 ++# define TV_HBLANK_END_MASK 0x1fff0000 ++/** Start of hblank, measured in pixels minus one from start of hsync */ ++# define TV_HBLANK_START_SHIFT 0 ++# define TV_HBLANK_START_MASK 0x0001fff ++ ++#define TV_V_CTL_1 0x6803c ++/** XXX */ ++# define TV_NBR_END_SHIFT 16 ++# define TV_NBR_END_MASK 0x07ff0000 ++/** XXX */ ++# define TV_VI_END_F1_SHIFT 8 ++# define TV_VI_END_F1_MASK 0x00003f00 ++/** XXX */ ++# define TV_VI_END_F2_SHIFT 0 ++# define TV_VI_END_F2_MASK 0x0000003f ++ ++#define TV_V_CTL_2 0x68040 ++/** Length of vsync, in half lines */ ++# define TV_VSYNC_LEN_MASK 0x07ff0000 ++# define TV_VSYNC_LEN_SHIFT 16 ++/** Offset of the start of vsync in field 1, measured in one less than the ++ * number of half lines. ++ */ ++# define TV_VSYNC_START_F1_MASK 0x00007f00 ++# define TV_VSYNC_START_F1_SHIFT 8 ++/** ++ * Offset of the start of vsync in field 2, measured in one less than the ++ * number of half lines. ++ */ ++# define TV_VSYNC_START_F2_MASK 0x0000007f ++# define TV_VSYNC_START_F2_SHIFT 0 ++ ++#define TV_V_CTL_3 0x68044 ++/** Enables generation of the equalization signal */ ++# define TV_EQUAL_ENA (1 << 31) ++/** Length of vsync, in half lines */ ++# define TV_VEQ_LEN_MASK 0x007f0000 ++# define TV_VEQ_LEN_SHIFT 16 ++/** Offset of the start of equalization in field 1, measured in one less than ++ * the number of half lines. ++ */ ++# define TV_VEQ_START_F1_MASK 0x0007f00 ++# define TV_VEQ_START_F1_SHIFT 8 ++/** ++ * Offset of the start of equalization in field 2, measured in one less than ++ * the number of half lines. ++ */ ++# define TV_VEQ_START_F2_MASK 0x000007f ++# define TV_VEQ_START_F2_SHIFT 0 ++ ++#define TV_V_CTL_4 0x68048 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F1_MASK 0x003f0000 ++# define TV_VBURST_START_F1_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F1_MASK 0x000000ff ++# define TV_VBURST_END_F1_SHIFT 0 ++ ++#define TV_V_CTL_5 0x6804c ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F2_MASK 0x003f0000 ++# define TV_VBURST_START_F2_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F2_MASK 0x000000ff ++# define TV_VBURST_END_F2_SHIFT 0 ++ ++#define TV_V_CTL_6 0x68050 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F3_MASK 0x003f0000 ++# define TV_VBURST_START_F3_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F3_MASK 0x000000ff ++# define TV_VBURST_END_F3_SHIFT 0 ++ ++#define TV_V_CTL_7 0x68054 ++/** ++ * Offset to start of vertical colorburst, measured in one less than the ++ * number of lines from vertical start. ++ */ ++# define TV_VBURST_START_F4_MASK 0x003f0000 ++# define TV_VBURST_START_F4_SHIFT 16 ++/** ++ * Offset to the end of vertical colorburst, measured in one less than the ++ * number of lines from the start of NBR. ++ */ ++# define TV_VBURST_END_F4_MASK 0x000000ff ++# define TV_VBURST_END_F4_SHIFT 0 ++ ++#define TV_SC_CTL_1 0x68060 ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA1_EN (1 << 31) ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA2_EN (1 << 30) ++/** Turns on the first subcarrier phase generation DDA */ ++# define TV_SC_DDA3_EN (1 << 29) ++/** Sets the subcarrier DDA to reset frequency every other field */ ++# define TV_SC_RESET_EVERY_2 (0 << 24) ++/** Sets the subcarrier DDA to reset frequency every fourth field */ ++# define TV_SC_RESET_EVERY_4 (1 << 24) ++/** Sets the subcarrier DDA to reset frequency every eighth field */ ++# define TV_SC_RESET_EVERY_8 (2 << 24) ++/** Sets the subcarrier DDA to never reset the frequency */ ++# define TV_SC_RESET_NEVER (3 << 24) ++/** Sets the peak amplitude of the colorburst.*/ ++# define TV_BURST_LEVEL_MASK 0x00ff0000 ++# define TV_BURST_LEVEL_SHIFT 16 ++/** Sets the increment of the first subcarrier phase generation DDA */ ++# define TV_SCDDA1_INC_MASK 0x00000fff ++# define TV_SCDDA1_INC_SHIFT 0 ++ ++#define TV_SC_CTL_2 0x68064 ++/** Sets the rollover for the second subcarrier phase generation DDA */ ++# define TV_SCDDA2_SIZE_MASK 0x7fff0000 ++# define TV_SCDDA2_SIZE_SHIFT 16 ++/** Sets the increent of the second subcarrier phase generation DDA */ ++# define TV_SCDDA2_INC_MASK 0x00007fff ++# define TV_SCDDA2_INC_SHIFT 0 ++ ++#define TV_SC_CTL_3 0x68068 ++/** Sets the rollover for the third subcarrier phase generation DDA */ ++# define TV_SCDDA3_SIZE_MASK 0x7fff0000 ++# define TV_SCDDA3_SIZE_SHIFT 16 ++/** Sets the increent of the third subcarrier phase generation DDA */ ++# define TV_SCDDA3_INC_MASK 0x00007fff ++# define TV_SCDDA3_INC_SHIFT 0 ++ ++#define TV_WIN_POS 0x68070 ++/** X coordinate of the display from the start of horizontal active */ ++# define TV_XPOS_MASK 0x1fff0000 ++# define TV_XPOS_SHIFT 16 ++/** Y coordinate of the display from the start of vertical active (NBR) */ ++# define TV_YPOS_MASK 0x00000fff ++# define TV_YPOS_SHIFT 0 ++ ++#define TV_WIN_SIZE 0x68074 ++/** Horizontal size of the display window, measured in pixels*/ ++# define TV_XSIZE_MASK 0x1fff0000 ++# define TV_XSIZE_SHIFT 16 ++/** ++ * Vertical size of the display window, measured in pixels. ++ * ++ * Must be even for interlaced modes. ++ */ ++# define TV_YSIZE_MASK 0x00000fff ++# define TV_YSIZE_SHIFT 0 ++ ++#define TV_FILTER_CTL_1 0x68080 ++/** ++ * Enables automatic scaling calculation. ++ * ++ * If set, the rest of the registers are ignored, and the calculated values can ++ * be read back from the register. ++ */ ++# define TV_AUTO_SCALE (1 << 31) ++/** ++ * Disables the vertical filter. ++ * ++ * This is required on modes more than 1024 pixels wide */ ++# define TV_V_FILTER_BYPASS (1 << 29) ++/** Enables adaptive vertical filtering */ ++# define TV_VADAPT (1 << 28) ++# define TV_VADAPT_MODE_MASK (3 << 26) ++/** Selects the least adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_LEAST (0 << 26) ++/** Selects the moderately adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_MODERATE (1 << 26) ++/** Selects the most adaptive vertical filtering mode */ ++# define TV_VADAPT_MODE_MOST (3 << 26) ++/** ++ * Sets the horizontal scaling factor. ++ * ++ * This should be the fractional part of the horizontal scaling factor divided ++ * by the oversampling rate. TV_HSCALE should be less than 1, and set to: ++ * ++ * (src width - 1) / ((oversample * dest width) - 1) ++ */ ++# define TV_HSCALE_FRAC_MASK 0x00003fff ++# define TV_HSCALE_FRAC_SHIFT 0 ++ ++#define TV_FILTER_CTL_2 0x68084 ++/** ++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) ++ */ ++# define TV_VSCALE_INT_MASK 0x00038000 ++# define TV_VSCALE_INT_SHIFT 15 ++/** ++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * \sa TV_VSCALE_INT_MASK ++ */ ++# define TV_VSCALE_FRAC_MASK 0x00007fff ++# define TV_VSCALE_FRAC_SHIFT 0 ++ ++#define TV_FILTER_CTL_3 0x68088 ++/** ++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) ++ * ++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. ++ */ ++# define TV_VSCALE_IP_INT_MASK 0x00038000 ++# define TV_VSCALE_IP_INT_SHIFT 15 ++/** ++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. ++ * ++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. ++ * ++ * \sa TV_VSCALE_IP_INT_MASK ++ */ ++# define TV_VSCALE_IP_FRAC_MASK 0x00007fff ++# define TV_VSCALE_IP_FRAC_SHIFT 0 ++ ++#define TV_CC_CONTROL 0x68090 ++# define TV_CC_ENABLE (1 << 31) ++/** ++ * Specifies which field to send the CC data in. ++ * ++ * CC data is usually sent in field 0. ++ */ ++# define TV_CC_FID_MASK (1 << 27) ++# define TV_CC_FID_SHIFT 27 ++/** Sets the horizontal position of the CC data. Usually 135. */ ++# define TV_CC_HOFF_MASK 0x03ff0000 ++# define TV_CC_HOFF_SHIFT 16 ++/** Sets the vertical position of the CC data. Usually 21 */ ++# define TV_CC_LINE_MASK 0x0000003f ++# define TV_CC_LINE_SHIFT 0 ++ ++#define TV_CC_DATA 0x68094 ++# define TV_CC_RDY (1 << 31) ++/** Second word of CC data to be transmitted. */ ++# define TV_CC_DATA_2_MASK 0x007f0000 ++# define TV_CC_DATA_2_SHIFT 16 ++/** First word of CC data to be transmitted. */ ++# define TV_CC_DATA_1_MASK 0x0000007f ++# define TV_CC_DATA_1_SHIFT 0 ++ ++#define TV_H_LUMA_0 0x68100 ++#define TV_H_LUMA_59 0x681ec ++#define TV_H_CHROMA_0 0x68200 ++#define TV_H_CHROMA_59 0x682ec ++#define TV_V_LUMA_0 0x68300 ++#define TV_V_LUMA_42 0x683a8 ++#define TV_V_CHROMA_0 0x68400 ++#define TV_V_CHROMA_42 0x684a8 ++ ++/* Display & cursor control */ ++ ++/* Pipe A */ ++#define PIPEADSL 0x70000 ++#define PIPEACONF 0x70008 ++#define PIPEACONF_ENABLE (1<<31) ++#define PIPEACONF_DISABLE 0 ++#define PIPEACONF_DOUBLE_WIDE (1<<30) ++#define I965_PIPECONF_ACTIVE (1<<30) ++#define PIPEACONF_SINGLE_WIDE 0 ++#define PIPEACONF_PIPE_UNLOCKED 0 ++#define PIPEACONF_PIPE_LOCKED (1<<25) ++#define PIPEACONF_PALETTE 0 ++#define PIPEACONF_GAMMA (1<<24) ++#define PIPECONF_FORCE_BORDER (1<<25) ++#define PIPECONF_PROGRESSIVE (0 << 21) ++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) ++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) ++#define PIPEASTAT 0x70024 ++#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) ++#define PIPE_CRC_ERROR_ENABLE (1UL<<29) ++#define PIPE_CRC_DONE_ENABLE (1UL<<28) ++#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) ++#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) ++#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) ++#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) ++#define PIPE_DPST_EVENT_ENABLE (1UL<<23) ++#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) ++#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) ++#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) ++#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ ++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ ++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) ++#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) ++#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) ++#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) ++#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) ++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) ++#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) ++#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) ++#define PIPE_DPST_EVENT_STATUS (1UL<<7) ++#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) ++#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) ++#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) ++#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ ++#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ ++#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) ++#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) ++ ++#define DSPARB 0x70030 ++#define DSPARB_CSTART_MASK (0x7f << 7) ++#define DSPARB_CSTART_SHIFT 7 ++#define DSPARB_BSTART_MASK (0x7f) ++#define DSPARB_BSTART_SHIFT 0 ++/* ++ * The two pipe frame counter registers are not synchronized, so ++ * reading a stable value is somewhat tricky. The following code ++ * should work: ++ * ++ * do { ++ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> ++ * PIPE_FRAME_HIGH_SHIFT; ++ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> ++ * PIPE_FRAME_LOW_SHIFT); ++ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> ++ * PIPE_FRAME_HIGH_SHIFT); ++ * } while (high1 != high2); ++ * frame = (high1 << 8) | low1; ++ */ ++#define PIPEAFRAMEHIGH 0x70040 ++#define PIPE_FRAME_HIGH_MASK 0x0000ffff ++#define PIPE_FRAME_HIGH_SHIFT 0 ++#define PIPEAFRAMEPIXEL 0x70044 ++#define PIPE_FRAME_LOW_MASK 0xff000000 ++#define PIPE_FRAME_LOW_SHIFT 24 ++#define PIPE_PIXEL_MASK 0x00ffffff ++#define PIPE_PIXEL_SHIFT 0 ++ ++/* Cursor A & B regs */ ++#define CURACNTR 0x70080 ++#define CURSOR_MODE_DISABLE 0x00 ++#define CURSOR_MODE_64_32B_AX 0x07 ++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) ++#define MCURSOR_GAMMA_ENABLE (1 << 26) ++#define CURABASE 0x70084 ++#define CURAPOS 0x70088 ++#define CURSOR_POS_MASK 0x007FF ++#define CURSOR_POS_SIGN 0x8000 ++#define CURSOR_X_SHIFT 0 ++#define CURSOR_Y_SHIFT 16 ++#define CURBCNTR 0x700c0 ++#define CURBBASE 0x700c4 ++#define CURBPOS 0x700c8 ++ ++/* Display A control */ ++#define DSPACNTR 0x70180 ++#define DISPLAY_PLANE_ENABLE (1<<31) ++#define DISPLAY_PLANE_DISABLE 0 ++#define DISPPLANE_GAMMA_ENABLE (1<<30) ++#define DISPPLANE_GAMMA_DISABLE 0 ++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) ++#define DISPPLANE_8BPP (0x2<<26) ++#define DISPPLANE_15_16BPP (0x4<<26) ++#define DISPPLANE_16BPP (0x5<<26) ++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) ++#define DISPPLANE_32BPP (0x7<<26) ++#define DISPPLANE_STEREO_ENABLE (1<<25) ++#define DISPPLANE_STEREO_DISABLE 0 ++#define DISPPLANE_SEL_PIPE_MASK (1<<24) ++#define DISPPLANE_SEL_PIPE_A 0 ++#define DISPPLANE_SEL_PIPE_B (1<<24) ++#define DISPPLANE_SRC_KEY_ENABLE (1<<22) ++#define DISPPLANE_SRC_KEY_DISABLE 0 ++#define DISPPLANE_LINE_DOUBLE (1<<20) ++#define DISPPLANE_NO_LINE_DOUBLE 0 ++#define DISPPLANE_STEREO_POLARITY_FIRST 0 ++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) ++#define DSPAADDR 0x70184 ++#define DSPASTRIDE 0x70188 ++#define DSPAPOS 0x7018C /* reserved */ ++#define DSPASIZE 0x70190 ++#define DSPASURF 0x7019C /* 965+ only */ ++#define DSPATILEOFF 0x701A4 /* 965+ only */ ++ ++/* VBIOS flags */ ++#define SWF00 0x71410 ++#define SWF01 0x71414 ++#define SWF02 0x71418 ++#define SWF03 0x7141c ++#define SWF04 0x71420 ++#define SWF05 0x71424 ++#define SWF06 0x71428 ++#define SWF10 0x70410 ++#define SWF11 0x70414 ++#define SWF14 0x71420 ++#define SWF30 0x72414 ++#define SWF31 0x72418 ++#define SWF32 0x7241c ++ ++/* Pipe B */ ++#define PIPEBDSL 0x71000 ++#define PIPEBCONF 0x71008 ++#define PIPEBSTAT 0x71024 ++#define PIPEBFRAMEHIGH 0x71040 ++#define PIPEBFRAMEPIXEL 0x71044 ++ ++/* Display B control */ ++#define DSPBCNTR 0x71180 ++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) ++#define DISPPLANE_ALPHA_TRANS_DISABLE 0 ++#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 ++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) ++#define DSPBADDR 0x71184 ++#define DSPBSTRIDE 0x71188 ++#define DSPBPOS 0x7118C ++#define DSPBSIZE 0x71190 ++#define DSPBSURF 0x7119C ++#define DSPBTILEOFF 0x711A4 ++ ++/* VBIOS regs */ ++#define VGACNTRL 0x71400 ++# define VGA_DISP_DISABLE (1 << 31) ++# define VGA_2X_MODE (1 << 30) ++# define VGA_PIPE_B_SELECT (1 << 29) ++ ++/* Chipset type macros */ ++ ++#define IS_I830(dev) ((dev)->pci_device == 0x3577) ++#define IS_845G(dev) ((dev)->pci_device == 0x2562) ++#define IS_I85X(dev) ((dev)->pci_device == 0x3582) ++#define IS_I855(dev) ((dev)->pci_device == 0x3582) ++#define IS_I865G(dev) ((dev)->pci_device == 0x2572) ++ ++#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) ++#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) ++#define IS_I945G(dev) ((dev)->pci_device == 0x2772) ++#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ ++ (dev)->pci_device == 0x27AE) ++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ ++ (dev)->pci_device == 0x2982 || \ ++ (dev)->pci_device == 0x2992 || \ ++ (dev)->pci_device == 0x29A2 || \ ++ (dev)->pci_device == 0x2A02 || \ ++ (dev)->pci_device == 0x2A12 || \ ++ (dev)->pci_device == 0x2A42 || \ ++ (dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22) ++ ++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) ++ ++#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) ++ ++#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ ++ (dev)->pci_device == 0x2E12 || \ ++ (dev)->pci_device == 0x2E22) ++ ++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ ++ (dev)->pci_device == 0x29B2 || \ ++ (dev)->pci_device == 0x29D2) ++ ++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ ++ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) ++ ++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ ++ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) ++ ++#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/i915_execbuf.c git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c +--- git/drivers/gpu/drm-tungsten/i915_execbuf.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,917 @@ ++/* ++ * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Thomas Hellstrom ++ * Dave Airlie ++ * Keith Packard ++ * ... ? ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if DRM_DEBUG_CODE ++#define DRM_DEBUG_RELOCATION (drm_debug != 0) ++#else ++#define DRM_DEBUG_RELOCATION 0 ++#endif ++ ++enum i915_buf_idle { ++ I915_RELOC_UNCHECKED, ++ I915_RELOC_IDLE, ++ I915_RELOC_BUSY ++}; ++ ++struct i915_relocatee_info { ++ struct drm_buffer_object *buf; ++ unsigned long offset; ++ uint32_t *data_page; ++ unsigned page_offset; ++ struct drm_bo_kmap_obj kmap; ++ int is_iomem; ++ int dst; ++ int idle; ++ int performed_ring_relocs; ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ unsigned long pfn; ++ pgprot_t pg_prot; ++#endif ++}; ++ ++struct drm_i915_validate_buffer { ++ struct drm_buffer_object *buffer; ++ int presumed_offset_correct; ++ void __user *data; ++ int ret; ++ enum i915_buf_idle idle; ++}; ++ ++/* ++ * I'd like to use MI_STORE_DATA_IMM here, but I can't make ++ * it work. Seems like GART writes are broken with that ++ * instruction. Also I'm not sure that MI_FLUSH will ++ * act as a memory barrier for that instruction. It will ++ * for this single dword 2D blit. ++ */ ++ ++static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset, ++ uint32_t value) ++{ ++ struct drm_i915_private *dev_priv = ++ (struct drm_i915_private *)dev->dev_private; ++ ++ RING_LOCALS; ++ i915_kernel_lost_context(dev); ++ BEGIN_LP_RING(6); ++ OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3)); ++ OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40)); ++ OUT_RING((0x1 << 16) | (0x4)); ++ OUT_RING(offset); ++ OUT_RING(value); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++} ++ ++static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer ++ *buffers, unsigned num_buffers) ++{ ++ while (num_buffers--) ++ drm_bo_usage_deref_locked(&buffers[num_buffers].buffer); ++} ++ ++int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, ++ struct drm_i915_validate_buffer *buffers, ++ struct i915_relocatee_info *relocatee, uint32_t * reloc) ++{ ++ unsigned index; ++ unsigned long new_cmd_offset; ++ u32 val; ++ int ret, i; ++ int buf_index = -1; ++ ++ /* ++ * FIXME: O(relocs * buffers) complexity. ++ */ ++ ++ for (i = 0; i <= num_buffers; i++) ++ if (buffers[i].buffer) ++ if (reloc[2] == buffers[i].buffer->base.hash.key) ++ buf_index = i; ++ ++ if (buf_index == -1) { ++ DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); ++ return -EINVAL; ++ } ++ ++ /* ++ * Short-circuit relocations that were correctly ++ * guessed by the client ++ */ ++ if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION) ++ return 0; ++ ++ new_cmd_offset = reloc[0]; ++ if (!relocatee->data_page || ++ !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { ++ struct drm_bo_mem_reg *mem = &relocatee->buf->mem; ++ ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ relocatee->offset = new_cmd_offset; ++ ++ if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) { ++ ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0); ++ if (ret) ++ return ret; ++ relocatee->idle = I915_RELOC_IDLE; ++ } ++ ++ if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) && ++ (mem->flags & DRM_BO_FLAG_CACHED_MAPPED))) ++ drm_bo_evict_cached(relocatee->buf); ++ ++ ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, ++ 1, &relocatee->kmap); ++ if (ret) { ++ DRM_ERROR ++ ("Could not map command buffer to apply relocs\n %08lx", ++ new_cmd_offset); ++ return ret; ++ } ++ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, ++ &relocatee->is_iomem); ++ relocatee->page_offset = (relocatee->offset & PAGE_MASK); ++ } ++ ++ val = buffers[buf_index].buffer->offset; ++ index = (reloc[0] - relocatee->page_offset) >> 2; ++ ++ /* add in validate */ ++ val = val + reloc[1]; ++ ++ if (DRM_DEBUG_RELOCATION) { ++ if (buffers[buf_index].presumed_offset_correct && ++ relocatee->data_page[index] != val) { ++ DRM_DEBUG ++ ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", ++ reloc[0], reloc[1], buf_index, ++ relocatee->data_page[index], val); ++ } ++ } ++ ++ if (relocatee->is_iomem) ++ iowrite32(val, relocatee->data_page + index); ++ else ++ relocatee->data_page[index] = val; ++ return 0; ++} ++ ++int i915_process_relocs(struct drm_file *file_priv, ++ uint32_t buf_handle, ++ uint32_t __user ** reloc_user_ptr, ++ struct i915_relocatee_info *relocatee, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t num_buffers) ++{ ++ int ret, reloc_stride; ++ uint32_t cur_offset; ++ uint32_t reloc_count; ++ uint32_t reloc_type; ++ uint32_t reloc_buf_size; ++ uint32_t *reloc_buf = NULL; ++ int i; ++ ++ /* do a copy from user from the user ptr */ ++ ret = get_user(reloc_count, *reloc_user_ptr); ++ if (ret) { ++ DRM_ERROR("Could not map relocation buffer.\n"); ++ goto out; ++ } ++ ++ ret = get_user(reloc_type, (*reloc_user_ptr) + 1); ++ if (ret) { ++ DRM_ERROR("Could not map relocation buffer.\n"); ++ goto out; ++ } ++ ++ if (reloc_type != 0) { ++ DRM_ERROR("Unsupported relocation type requested\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ reloc_buf_size = ++ (I915_RELOC_HEADER + ++ (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t); ++ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); ++ if (!reloc_buf) { ++ DRM_ERROR("Out of memory for reloc buffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* get next relocate buffer handle */ ++ *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2]; ++ ++ reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ ++ ++ DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, ++ *reloc_user_ptr); ++ ++ for (i = 0; i < reloc_count; i++) { ++ cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE); ++ ++ ret = i915_apply_reloc(file_priv, num_buffers, buffers, ++ relocatee, reloc_buf + cur_offset); ++ if (ret) ++ goto out; ++ } ++ ++ out: ++ if (reloc_buf) ++ kfree(reloc_buf); ++ ++ if (relocatee->data_page) { ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ } ++ ++ return ret; ++} ++ ++static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, ++ uint32_t __user * reloc_user_ptr, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t buf_count) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ struct i915_relocatee_info relocatee; ++ int ret = 0; ++ int b; ++ ++ /* ++ * Short circuit relocations when all previous ++ * buffers offsets were correctly guessed by ++ * the client ++ */ ++ if (!DRM_DEBUG_RELOCATION) { ++ for (b = 0; b < buf_count; b++) ++ if (!buffers[b].presumed_offset_correct) ++ break; ++ ++ if (b == buf_count) ++ return 0; ++ } ++ ++ memset(&relocatee, 0, sizeof(relocatee)); ++ relocatee.idle = I915_RELOC_UNCHECKED; ++ ++ mutex_lock(&dev->struct_mutex); ++ relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); ++ mutex_unlock(&dev->struct_mutex); ++ if (!relocatee.buf) { ++ DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ ++ mutex_lock(&relocatee.buf->mutex); ++ while (reloc_user_ptr) { ++ ret = ++ i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, ++ &relocatee, buffers, buf_count); ++ if (ret) { ++ DRM_ERROR("process relocs failed\n"); ++ goto out_err1; ++ } ++ } ++ ++ out_err1: ++ mutex_unlock(&relocatee.buf->mutex); ++ drm_bo_usage_deref_unlocked(&relocatee.buf); ++ out_err: ++ return ret; ++} ++ ++static void i915_clear_relocatee(struct i915_relocatee_info *relocatee) ++{ ++ if (relocatee->data_page) { ++#ifndef DRM_KMAP_ATOMIC_PROT_PFN ++ drm_bo_kunmap(&relocatee->kmap); ++#else ++ kunmap_atomic(relocatee->data_page, KM_USER0); ++#endif ++ relocatee->data_page = NULL; ++ } ++ relocatee->buf = NULL; ++ relocatee->dst = ~0; ++} ++ ++static int i915_update_relocatee(struct i915_relocatee_info *relocatee, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int dst, unsigned long dst_offset) ++{ ++ int ret; ++ ++ if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) { ++ i915_clear_relocatee(relocatee); ++ relocatee->dst = dst; ++ relocatee->buf = buffers[dst].buffer; ++ relocatee->idle = buffers[dst].idle; ++ ++ /* ++ * Check for buffer idle. If the buffer is busy, revert to ++ * ring relocations. ++ */ ++ ++ if (relocatee->idle == I915_RELOC_UNCHECKED) { ++ preempt_enable(); ++ mutex_lock(&relocatee->buf->mutex); ++ ++ ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0); ++ if (ret == 0) ++ relocatee->idle = I915_RELOC_IDLE; ++ else { ++ relocatee->idle = I915_RELOC_BUSY; ++ relocatee->performed_ring_relocs = 1; ++ } ++ mutex_unlock(&relocatee->buf->mutex); ++ preempt_disable(); ++ buffers[dst].idle = relocatee->idle; ++ } ++ } ++ ++ if (relocatee->idle == I915_RELOC_BUSY) ++ return 0; ++ ++ if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) { ++ DRM_ERROR("Relocation destination out of bounds.\n"); ++ return -EINVAL; ++ } ++ if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) || ++ NULL == relocatee->data_page)) { ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ if (NULL != relocatee->data_page) { ++ kunmap_atomic(relocatee->data_page, KM_USER0); ++ relocatee->data_page = NULL; ++ } ++ ret = drm_bo_pfn_prot(relocatee->buf, dst_offset, ++ &relocatee->pfn, &relocatee->pg_prot); ++ if (ret) { ++ DRM_ERROR("Can't map relocation destination.\n"); ++ return -EINVAL; ++ } ++ relocatee->data_page = ++ kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0, ++ relocatee->pg_prot); ++#else ++ if (NULL != relocatee->data_page) { ++ drm_bo_kunmap(&relocatee->kmap); ++ relocatee->data_page = NULL; ++ } ++ ++ ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT, ++ 1, &relocatee->kmap); ++ if (ret) { ++ DRM_ERROR("Can't map relocation destination.\n"); ++ return ret; ++ } ++ ++ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, ++ &relocatee->is_iomem); ++#endif ++ relocatee->page_offset = dst_offset & PAGE_MASK; ++ } ++ return 0; ++} ++ ++static int i915_apply_post_reloc(uint32_t reloc[], ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t num_buffers, ++ struct i915_relocatee_info *relocatee) ++{ ++ uint32_t reloc_buffer = reloc[2]; ++ uint32_t dst_buffer = reloc[3]; ++ uint32_t val; ++ uint32_t index; ++ int ret; ++ ++ if (likely(buffers[reloc_buffer].presumed_offset_correct)) ++ return 0; ++ if (unlikely(reloc_buffer >= num_buffers)) { ++ DRM_ERROR("Invalid reloc buffer index.\n"); ++ return -EINVAL; ++ } ++ if (unlikely(dst_buffer >= num_buffers)) { ++ DRM_ERROR("Invalid dest buffer index.\n"); ++ return -EINVAL; ++ } ++ ++ ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]); ++ if (unlikely(ret)) ++ return ret; ++ ++ val = buffers[reloc_buffer].buffer->offset; ++ index = (reloc[0] - relocatee->page_offset) >> 2; ++ val = val + reloc[1]; ++ ++ if (relocatee->idle == I915_RELOC_BUSY) { ++ i915_emit_ring_reloc(relocatee->buf->dev, ++ relocatee->buf->offset + reloc[0], val); ++ return 0; ++ } ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ relocatee->data_page[index] = val; ++#else ++ if (likely(relocatee->is_iomem)) ++ iowrite32(val, relocatee->data_page + index); ++ else ++ relocatee->data_page[index] = val; ++#endif ++ ++ return 0; ++} ++ ++static int i915_post_relocs(struct drm_file *file_priv, ++ uint32_t __user * new_reloc_ptr, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int num_buffers) ++{ ++ uint32_t *reloc; ++ uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); ++ uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t); ++ struct i915_relocatee_info relocatee; ++ uint32_t reloc_type; ++ uint32_t num_relocs; ++ uint32_t count; ++ int ret = 0; ++ int i; ++ int short_circuit = 1; ++ uint32_t __user *reloc_ptr; ++ uint64_t new_reloc_data; ++ uint32_t reloc_buf_size; ++ uint32_t *reloc_buf; ++ ++ for (i = 0; i < num_buffers; ++i) { ++ if (unlikely(!buffers[i].presumed_offset_correct)) { ++ short_circuit = 0; ++ break; ++ } ++ } ++ ++ if (likely(short_circuit)) ++ return 0; ++ ++ memset(&relocatee, 0, sizeof(relocatee)); ++ ++ while (new_reloc_ptr) { ++ reloc_ptr = new_reloc_ptr; ++ ++ ret = get_user(num_relocs, reloc_ptr); ++ if (unlikely(ret)) ++ goto out; ++ if (unlikely(!access_ok(VERIFY_READ, reloc_ptr, ++ header_size + ++ num_relocs * reloc_stride))) ++ return -EFAULT; ++ ++ ret = __get_user(reloc_type, reloc_ptr + 1); ++ if (unlikely(ret)) ++ goto out; ++ ++ if (unlikely(reloc_type != 1)) { ++ DRM_ERROR("Unsupported relocation type requested.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ret = __get_user(new_reloc_data, reloc_ptr + 2); ++ new_reloc_ptr = (uint32_t __user *) (unsigned long) ++ new_reloc_data; ++ ++ reloc_ptr += I915_RELOC_HEADER; ++ ++ if (num_relocs == 0) ++ goto out; ++ ++ reloc_buf_size = ++ (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t); ++ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); ++ if (!reloc_buf) { ++ DRM_ERROR("Out of memory for reloc buffer\n"); ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ reloc = reloc_buf; ++ preempt_disable(); ++ for (count = 0; count < num_relocs; ++count) { ++ ret = i915_apply_post_reloc(reloc, buffers, ++ num_buffers, &relocatee); ++ if (unlikely(ret)) { ++ preempt_enable(); ++ goto out; ++ } ++ reloc += I915_RELOC0_STRIDE; ++ } ++ preempt_enable(); ++ ++ if (reloc_buf) { ++ kfree(reloc_buf); ++ reloc_buf = NULL; ++ } ++ i915_clear_relocatee(&relocatee); ++ } ++ ++ out: ++ /* ++ * Flush ring relocs so the command parser will pick them up. ++ */ ++ ++ if (relocatee.performed_ring_relocs) ++ (void)i915_emit_mi_flush(file_priv->minor->dev, 0); ++ ++ i915_clear_relocatee(&relocatee); ++ if (reloc_buf) { ++ kfree(reloc_buf); ++ reloc_buf = NULL; ++ } ++ ++ return ret; ++} ++ ++static int i915_check_presumed(struct drm_i915_op_arg *arg, ++ struct drm_buffer_object *bo, ++ uint32_t __user * data, int *presumed_ok) ++{ ++ struct drm_bo_op_req *req = &arg->d.req; ++ uint32_t hint_offset; ++ uint32_t hint = req->bo_req.hint; ++ ++ *presumed_ok = 0; ++ ++ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET)) ++ return 0; ++ if (bo->offset == req->bo_req.presumed_offset) { ++ *presumed_ok = 1; ++ return 0; ++ } ++ ++ /* ++ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in ++ * the user-space IOCTL argument list, since the buffer has moved, ++ * we're about to apply relocations and we might subsequently ++ * hit an -EAGAIN. In that case the argument list will be reused by ++ * user-space, but the presumed offset is no longer valid. ++ * ++ * Needless to say, this is a bit ugly. ++ */ ++ ++ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg; ++ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET; ++ return __put_user(hint, data + hint_offset); ++} ++ ++/* ++ * Validate, add fence and relocate a block of bos from a userspace list ++ */ ++int i915_validate_buffer_list(struct drm_file *file_priv, ++ unsigned int fence_class, uint64_t data, ++ struct drm_i915_validate_buffer *buffers, ++ uint32_t * num_buffers, ++ uint32_t __user ** post_relocs) ++{ ++ struct drm_i915_op_arg arg; ++ struct drm_bo_op_req *req = &arg.d.req; ++ int ret = 0; ++ unsigned buf_count = 0; ++ uint32_t buf_handle; ++ uint32_t __user *reloc_user_ptr; ++ struct drm_i915_validate_buffer *item = buffers; ++ *post_relocs = NULL; ++ ++ do { ++ if (buf_count >= *num_buffers) { ++ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ item = buffers + buf_count; ++ item->buffer = NULL; ++ item->presumed_offset_correct = 0; ++ item->idle = I915_RELOC_UNCHECKED; ++ ++ if (copy_from_user ++ (&arg, (void __user *)(unsigned long)data, sizeof(arg))) { ++ ret = -EFAULT; ++ goto out_err; ++ } ++ ++ ret = 0; ++ if (req->op != drm_bo_validate) { ++ DRM_ERROR ++ ("Buffer object operation wasn't \"validate\".\n"); ++ ret = -EINVAL; ++ goto out_err; ++ } ++ item->ret = 0; ++ item->data = (void __user *)(unsigned long)data; ++ ++ buf_handle = req->bo_req.handle; ++ reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr; ++ ++ /* ++ * Switch mode to post-validation relocations? ++ */ ++ ++ if (unlikely((buf_count == 0) && (*post_relocs == NULL) && ++ (reloc_user_ptr != NULL))) { ++ uint32_t reloc_type; ++ ++ ret = get_user(reloc_type, reloc_user_ptr + 1); ++ if (ret) ++ goto out_err; ++ ++ if (reloc_type == 1) ++ *post_relocs = reloc_user_ptr; ++ ++ } ++ ++ if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) { ++ ret = ++ i915_exec_reloc(file_priv, buf_handle, ++ reloc_user_ptr, buffers, buf_count); ++ if (ret) ++ goto out_err; ++ DRM_MEMORYBARRIER(); ++ } ++ ++ ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, ++ req->bo_req.flags, ++ req->bo_req.mask, req->bo_req.hint, ++ req->bo_req.fence_class, ++ NULL, &item->buffer); ++ if (ret) { ++ DRM_ERROR("error on handle validate %d\n", ret); ++ goto out_err; ++ } ++ ++ buf_count++; ++ ++ ret = i915_check_presumed(&arg, item->buffer, ++ (uint32_t __user *) ++ (unsigned long)data, ++ &item->presumed_offset_correct); ++ if (ret) ++ goto out_err; ++ ++ data = arg.next; ++ } while (data != 0); ++ out_err: ++ *num_buffers = buf_count; ++ item->ret = (ret != -EAGAIN) ? ret : 0; ++ return ret; ++} ++ ++/* ++ * Remove all buffers from the unfenced list. ++ * If the execbuffer operation was aborted, for example due to a signal, ++ * this also make sure that buffers retain their original state and ++ * fence pointers. ++ * Copy back buffer information to user-space unless we were interrupted ++ * by a signal. In which case the IOCTL must be rerun. ++ */ ++ ++static int i915_handle_copyback(struct drm_device *dev, ++ struct drm_i915_validate_buffer *buffers, ++ unsigned int num_buffers, int ret) ++{ ++ int err = ret; ++ int i; ++ struct drm_i915_op_arg arg; ++ struct drm_buffer_object *bo; ++ ++ if (ret) ++ drm_putback_buffer_objects(dev); ++ ++ if (ret != -EAGAIN) { ++ for (i = 0; i < num_buffers; ++i) { ++ arg.handled = 1; ++ arg.d.rep.ret = buffers->ret; ++ bo = buffers->buffer; ++ mutex_lock(&bo->mutex); ++ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info); ++ mutex_unlock(&bo->mutex); ++ if (__copy_to_user(buffers->data, &arg, sizeof(arg))) ++ err = -EFAULT; ++ buffers++; ++ } ++ } ++ ++ return err; ++} ++ ++/* ++ * Create a fence object, and if that fails, pretend that everything is ++ * OK and just idle the GPU. ++ */ ++ ++void i915_fence_or_sync(struct drm_file *file_priv, ++ uint32_t fence_flags, ++ struct drm_fence_arg *fence_arg, ++ struct drm_fence_object **fence_p) ++{ ++ struct drm_device *dev = file_priv->minor->dev; ++ int ret; ++ struct drm_fence_object *fence; ++ ++ ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence); ++ ++ if (ret) { ++ ++ /* ++ * Fence creation failed. ++ * Fall back to synchronous operation and idle the engine. ++ */ ++ ++ (void)i915_emit_mi_flush(dev, MI_READ_FLUSH); ++ (void)i915_quiescent(dev); ++ ++ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { ++ ++ /* ++ * Communicate to user-space that ++ * fence creation has failed and that ++ * the engine is idle. ++ */ ++ ++ fence_arg->handle = ~0; ++ fence_arg->error = ret; ++ } ++ drm_putback_buffer_objects(dev); ++ if (fence_p) ++ *fence_p = NULL; ++ return; ++ } ++ ++ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { ++ ++ ret = drm_fence_add_user_object(file_priv, fence, ++ fence_flags & ++ DRM_FENCE_FLAG_SHAREABLE); ++ if (!ret) ++ drm_fence_fill_arg(fence, fence_arg); ++ else { ++ /* ++ * Fence user object creation failed. ++ * We must idle the engine here as well, as user- ++ * space expects a fence object to wait on. Since we ++ * have a fence object we wait for it to signal ++ * to indicate engine "sufficiently" idle. ++ */ ++ ++ (void)drm_fence_object_wait(fence, 0, 1, fence->type); ++ drm_fence_usage_deref_unlocked(&fence); ++ fence_arg->handle = ~0; ++ fence_arg->error = ret; ++ } ++ } ++ ++ if (fence_p) ++ *fence_p = fence; ++ else if (fence) ++ drm_fence_usage_deref_unlocked(&fence); ++} ++ ++int i915_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) ++ dev_priv->sarea_priv; ++ struct drm_i915_execbuffer *exec_buf = data; ++ struct drm_i915_batchbuffer *batch = &exec_buf->batch; ++ struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; ++ int num_buffers; ++ int ret; ++ uint32_t __user *post_relocs; ++ ++ if (!dev_priv->allow_batchbuffer) { ++ DRM_ERROR("Batchbuffer ioctl disabled\n"); ++ return -EINVAL; ++ } ++ ++ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, ++ batch->num_cliprects * ++ sizeof(struct ++ drm_clip_rect))) ++ return -EFAULT; ++ ++ if (exec_buf->num_buffers > dev_priv->max_validate_buffers) ++ return -EINVAL; ++ ++ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); ++ if (ret) ++ return ret; ++ ++ /* ++ * The cmdbuf_mutex makes sure the validate-submit-fence ++ * operation is atomic. ++ */ ++ ++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); ++ if (ret) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return -EAGAIN; ++ } ++ ++ num_buffers = exec_buf->num_buffers; ++ ++ if (!dev_priv->val_bufs) { ++ dev_priv->val_bufs = ++ vmalloc(sizeof(struct drm_i915_validate_buffer) * ++ dev_priv->max_validate_buffers); ++ } ++ if (!dev_priv->val_bufs) { ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ mutex_unlock(&dev_priv->cmdbuf_mutex); ++ return -ENOMEM; ++ } ++ ++ /* validate buffer list + fixup relocations */ ++ ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, ++ dev_priv->val_bufs, &num_buffers, ++ &post_relocs); ++ if (ret) ++ goto out_err0; ++ ++ if (post_relocs) { ++ ret = i915_post_relocs(file_priv, post_relocs, ++ dev_priv->val_bufs, num_buffers); ++ if (ret) ++ goto out_err0; ++ } ++ ++ /* make sure all previous memory operations have passed */ ++ DRM_MEMORYBARRIER(); ++ ++ if (!post_relocs) { ++ drm_agp_chipset_flush(dev); ++ batch->start = ++ dev_priv->val_bufs[num_buffers - 1].buffer->offset; ++ } else { ++ batch->start += dev_priv->val_bufs[0].buffer->offset; ++ } ++ ++ DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", ++ batch->start, batch->used, batch->num_cliprects); ++ ++ ret = i915_dispatch_batchbuffer(dev, batch); ++ if (ret) ++ goto out_err0; ++ if (sarea_priv) ++ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL); ++ ++ out_err0: ++ ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret); ++ mutex_lock(&dev->struct_mutex); ++ i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers); ++ mutex_unlock(&dev->struct_mutex); ++ mutex_unlock(&dev_priv->cmdbuf_mutex); ++ drm_bo_read_unlock(&dev->bm.bm_lock); ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_fence.c git-nokia/drivers/gpu/drm-tungsten/i915_fence.c +--- git/drivers/gpu/drm-tungsten/i915_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_fence.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,273 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * ++ **************************************************************************/ ++/* ++ * Authors: Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* ++ * Initiate a sync flush if it's not already pending. ++ */ ++ ++static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv, ++ struct drm_fence_class_manager *fc) ++{ ++ if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && ++ !dev_priv->flush_pending) { ++ dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); ++ dev_priv->flush_flags = fc->pending_flush; ++ dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); ++ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); ++ dev_priv->flush_pending = 1; ++ fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW; ++ } ++} ++ ++static inline void i915_report_rwflush(struct drm_device *dev, ++ struct drm_i915_private *dev_priv) ++{ ++ if (unlikely(dev_priv->flush_pending)) { ++ ++ uint32_t flush_flags; ++ uint32_t i_status; ++ uint32_t flush_sequence; ++ ++ i_status = READ_HWSP(dev_priv, 0); ++ if ((i_status & (1 << 12)) != ++ (dev_priv->saved_flush_status & (1 << 12))) { ++ flush_flags = dev_priv->flush_flags; ++ flush_sequence = dev_priv->flush_sequence; ++ dev_priv->flush_pending = 0; ++ drm_fence_handler(dev, 0, flush_sequence, ++ flush_flags, 0); ++ } ++ } ++} ++ ++static void i915_fence_flush(struct drm_device *dev, ++ uint32_t fence_class) ++{ ++ struct drm_i915_private *dev_priv = ++ (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ unsigned long irq_flags; ++ ++ if (unlikely(!dev_priv)) ++ return; ++ ++ write_lock_irqsave(&fm->lock, irq_flags); ++ i915_initiate_rwflush(dev_priv, fc); ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++} ++ ++ ++static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class, ++ uint32_t waiting_types) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ uint32_t sequence; ++ ++ if (unlikely(!dev_priv)) ++ return; ++ ++ /* ++ * First, report any executed sync flush: ++ */ ++ ++ i915_report_rwflush(dev, dev_priv); ++ ++ /* ++ * Report A new breadcrumb, and adjust IRQs. ++ */ ++ ++ if (waiting_types & DRM_FENCE_TYPE_EXE) { ++ ++ sequence = READ_BREADCRUMB(dev_priv); ++ drm_fence_handler(dev, 0, sequence, ++ DRM_FENCE_TYPE_EXE, 0); ++ ++ if (dev_priv->fence_irq_on && ++ !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) { ++ i915_user_irq_off(dev_priv); ++ dev_priv->fence_irq_on = 0; ++ } else if (!dev_priv->fence_irq_on && ++ (fc->waiting_types & DRM_FENCE_TYPE_EXE)) { ++ i915_user_irq_on(dev_priv); ++ dev_priv->fence_irq_on = 1; ++ } ++ } ++ ++ /* ++ * There may be new RW flushes pending. Start them. ++ */ ++ ++ i915_initiate_rwflush(dev_priv, fc); ++ ++ /* ++ * And possibly, but unlikely, they finish immediately. ++ */ ++ ++ i915_report_rwflush(dev, dev_priv); ++ ++} ++ ++static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, ++ uint32_t flags, uint32_t *sequence, ++ uint32_t *native_type) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ if (unlikely(!dev_priv)) ++ return -EINVAL; ++ ++ i915_emit_irq(dev); ++ *sequence = (uint32_t) dev_priv->counter; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ if (flags & DRM_I915_FENCE_FLAG_FLUSHED) ++ *native_type |= DRM_I915_FENCE_TYPE_RW; ++ ++ return 0; ++} ++ ++void i915_fence_handler(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ ++ write_lock(&fm->lock); ++ if (likely(dev_priv->fence_irq_on)) ++ i915_fence_poll(dev, 0, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++/* ++ * We need a separate wait function since we need to poll for ++ * sync flushes. ++ */ ++ ++static int i915_fence_wait(struct drm_fence_object *fence, ++ int lazy, int interruptible, uint32_t mask) ++{ ++ struct drm_device *dev = fence->dev; ++ drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[0]; ++ int ret; ++ unsigned long _end = jiffies + 3 * DRM_HZ; ++ ++ drm_fence_object_flush(fence, mask); ++ if (likely(interruptible)) ++ ret = wait_event_interruptible_timeout ++ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), ++ 3 * DRM_HZ); ++ else ++ ret = wait_event_timeout ++ (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), ++ 3 * DRM_HZ); ++ ++ if (unlikely(ret == -ERESTARTSYS)) ++ return -EAGAIN; ++ ++ if (unlikely(ret == 0)) ++ return -EBUSY; ++ ++ if (likely(mask == DRM_FENCE_TYPE_EXE || ++ drm_fence_object_signaled(fence, mask))) ++ return 0; ++ ++ /* ++ * Remove this code snippet when fixed. HWSTAM doesn't let ++ * flush info through... ++ */ ++ ++ if (unlikely(dev_priv && !dev_priv->irq_enabled)) { ++ unsigned long irq_flags; ++ ++ DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n"); ++ msleep(100); ++ dev_priv->flush_pending = 0; ++ write_lock_irqsave(&fm->lock, irq_flags); ++ drm_fence_handler(dev, fence->fence_class, ++ fence->sequence, fence->type, 0); ++ write_unlock_irqrestore(&fm->lock, irq_flags); ++ } ++ ++ /* ++ * Poll for sync flush completion. ++ */ ++ ++ return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end); ++} ++ ++static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence) ++{ ++ uint32_t flush_flags = fence->waiting_types & ++ ~(DRM_FENCE_TYPE_EXE | fence->signaled_types); ++ ++ if (likely(flush_flags == 0 || ++ ((flush_flags & ~fence->native_types) == 0) || ++ (fence->signaled_types != DRM_FENCE_TYPE_EXE))) ++ return 0; ++ else { ++ struct drm_device *dev = fence->dev; ++ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; ++ struct drm_fence_driver *driver = dev->driver->fence_driver; ++ ++ if (unlikely(!dev_priv)) ++ return 0; ++ ++ if (dev_priv->flush_pending) { ++ uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & ++ driver->sequence_mask; ++ ++ if (diff < driver->wrap_diff) ++ return 0; ++ } ++ } ++ return flush_flags; ++} ++ ++struct drm_fence_driver i915_fence_driver = { ++ .num_classes = 1, ++ .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), ++ .flush_diff = (1U << (BREADCRUMB_BITS - 2)), ++ .sequence_mask = BREADCRUMB_MASK, ++ .has_irq = NULL, ++ .emit = i915_fence_emit_sequence, ++ .flush = i915_fence_flush, ++ .poll = i915_fence_poll, ++ .needed_flush = i915_fence_needed_flush, ++ .wait = i915_fence_wait, ++}; +diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem.c git-nokia/drivers/gpu/drm-tungsten/i915_gem.c +--- git/drivers/gpu/drm-tungsten/i915_gem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,2502 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++#include ++ ++static int ++i915_gem_object_set_domain(struct drm_gem_object *obj, ++ uint32_t read_domains, ++ uint32_t write_domain); ++static int ++i915_gem_object_set_domain_range(struct drm_gem_object *obj, ++ uint64_t offset, ++ uint64_t size, ++ uint32_t read_domains, ++ uint32_t write_domain); ++int ++i915_gem_set_domain(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ uint32_t read_domains, ++ uint32_t write_domain); ++static int i915_gem_object_get_page_list(struct drm_gem_object *obj); ++static void i915_gem_object_free_page_list(struct drm_gem_object *obj); ++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); ++ ++int ++i915_gem_init_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_init *args = data; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (args->gtt_start >= args->gtt_end || ++ (args->gtt_start & (PAGE_SIZE - 1)) != 0 || ++ (args->gtt_end & (PAGE_SIZE - 1)) != 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, ++ args->gtt_end - args->gtt_start); ++ ++ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++ ++/** ++ * Creates a new mm object and returns a handle to it. ++ */ ++int ++i915_gem_create_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_create *args = data; ++ struct drm_gem_object *obj; ++ int handle, ret; ++ ++ args->size = roundup(args->size, PAGE_SIZE); ++ ++ /* Allocate the new object */ ++ obj = drm_gem_object_alloc(dev, args->size); ++ if (obj == NULL) ++ return -ENOMEM; ++ ++ ret = drm_gem_handle_create(file_priv, obj, &handle); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_handle_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ if (ret) ++ return ret; ++ ++ args->handle = handle; ++ ++ return 0; ++} ++ ++/** ++ * Reads data from the object referenced by handle. ++ * ++ * On error, the contents of *data are undefined. ++ */ ++int ++i915_gem_pread_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pread *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ssize_t read; ++ loff_t offset; ++ int ret; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ obj_priv = obj->driver_private; ++ ++ /* Bounds check source. ++ * ++ * XXX: This could use review for overflow issues... ++ */ ++ if (args->offset > obj->size || args->size > obj->size || ++ args->offset + args->size > obj->size) { ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, ++ I915_GEM_DOMAIN_CPU, 0); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ offset = args->offset; ++ ++ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, ++ args->size, &offset); ++ if (read != args->size) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (read < 0) ++ return read; ++ else ++ return -EINVAL; ++ } ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++#include "drm_compat.h" ++ ++static int ++i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ++ struct drm_i915_gem_pwrite *args, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ssize_t remain; ++ loff_t offset; ++ char __user *user_data; ++ char *vaddr; ++ int i, o, l; ++ int ret = 0; ++ unsigned long pfn; ++ unsigned long unwritten; ++ ++ user_data = (char __user *) (uintptr_t) args->data_ptr; ++ remain = args->size; ++ if (!access_ok(VERIFY_READ, user_data, remain)) ++ return -EFAULT; ++ ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_gem_object_pin(obj, 0); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ret = i915_gem_set_domain(obj, file_priv, ++ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); ++ if (ret) ++ goto fail; ++ ++ obj_priv = obj->driver_private; ++ offset = obj_priv->gtt_offset + args->offset; ++ obj_priv->dirty = 1; ++ ++ while (remain > 0) { ++ /* Operation in this page ++ * ++ * i = page number ++ * o = offset within page ++ * l = bytes to copy ++ */ ++ i = offset >> PAGE_SHIFT; ++ o = offset & (PAGE_SIZE-1); ++ l = remain; ++ if ((o + l) > PAGE_SIZE) ++ l = PAGE_SIZE - o; ++ ++ pfn = (dev->agp->base >> PAGE_SHIFT) + i; ++ ++#ifdef DRM_KMAP_ATOMIC_PROT_PFN ++ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels ++ */ ++ vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0, ++ __pgprot(__PAGE_KERNEL)); ++#if WATCH_PWRITE ++ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", ++ i, o, l, pfn, vaddr); ++#endif ++ unwritten = __copy_from_user_inatomic_nocache(vaddr + o, ++ user_data, l); ++ kunmap_atomic(vaddr, KM_USER0); ++ ++ if (unwritten) ++#endif ++ { ++ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); ++#if WATCH_PWRITE ++ DRM_INFO("pwrite slow i %d o %d l %d " ++ "pfn %ld vaddr %p\n", ++ i, o, l, pfn, vaddr); ++#endif ++ if (vaddr == NULL) { ++ ret = -EFAULT; ++ goto fail; ++ } ++ unwritten = __copy_from_user(vaddr + o, user_data, l); ++#if WATCH_PWRITE ++ DRM_INFO("unwritten %ld\n", unwritten); ++#endif ++ iounmap(vaddr); ++ if (unwritten) { ++ ret = -EFAULT; ++ goto fail; ++ } ++ } ++ ++ remain -= l; ++ user_data += l; ++ offset += l; ++ } ++#if WATCH_PWRITE && 1 ++ i915_gem_clflush_object(obj); ++ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); ++ i915_gem_clflush_object(obj); ++#endif ++ ++fail: ++ i915_gem_object_unpin(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return ret; ++} ++ ++int ++i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, ++ struct drm_i915_gem_pwrite *args, ++ struct drm_file *file_priv) ++{ ++ int ret; ++ loff_t offset; ++ ssize_t written; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ ret = i915_gem_set_domain(obj, file_priv, ++ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); ++ if (ret) { ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ offset = args->offset; ++ ++ written = vfs_write(obj->filp, ++ (char __user *)(uintptr_t) args->data_ptr, ++ args->size, &offset); ++ if (written != args->size) { ++ mutex_unlock(&dev->struct_mutex); ++ if (written < 0) ++ return written; ++ else ++ return -EINVAL; ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++/** ++ * Writes data to the object referenced by handle. ++ * ++ * On error, the contents of the buffer that were to be modified are undefined. ++ */ ++int ++i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pwrite *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ obj_priv = obj->driver_private; ++ ++ /* Bounds check destination. ++ * ++ * XXX: This could use review for overflow issues... ++ */ ++ if (args->offset > obj->size || args->size > obj->size || ++ args->offset + args->size > obj->size) { ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ ++ /* We can only do the GTT pwrite on untiled buffers, as otherwise ++ * it would end up going through the fenced access, and we'll get ++ * different detiling behavior between reading and writing. ++ * pread/pwrite currently are reading and writing from the CPU ++ * perspective, requiring manual detiling by the client. ++ */ ++ if (obj_priv->tiling_mode == I915_TILING_NONE && ++ dev->gtt_total != 0) ++ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); ++ else ++ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); ++ ++#if WATCH_PWRITE ++ if (ret) ++ DRM_INFO("pwrite failed %d\n", ret); ++#endif ++ ++ drm_gem_object_unreference(obj); ++ ++ return ret; ++} ++ ++/** ++ * Called when user space prepares to use an object ++ */ ++int ++i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_set_domain *args = data; ++ struct drm_gem_object *obj; ++ int ret; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ ++ mutex_lock(&dev->struct_mutex); ++#if WATCH_BUF ++ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", ++ obj, obj->size, args->read_domains, args->write_domain); ++#endif ++ ret = i915_gem_set_domain(obj, file_priv, ++ args->read_domains, args->write_domain); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Called when user space has done writes to this buffer ++ */ ++int ++i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_sw_finish *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ mutex_lock(&dev->struct_mutex); ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++#if WATCH_BUF ++ DRM_INFO("%s: sw_finish %d (%p %d)\n", ++ __func__, args->handle, obj, obj->size); ++#endif ++ obj_priv = obj->driver_private; ++ ++ /* Pinned buffers may be scanout, so flush the cache */ ++ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ } ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++/** ++ * Maps the contents of an object, returning the address it is mapped ++ * into. ++ * ++ * While the mapping holds a reference on the contents of the object, it doesn't ++ * imply a ref on the object itself. ++ */ ++int ++i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_mmap *args = data; ++ struct drm_gem_object *obj; ++ loff_t offset; ++ unsigned long addr; ++ ++ if (!(dev->driver->driver_features & DRIVER_GEM)) ++ return -ENODEV; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EBADF; ++ ++ offset = args->offset; ++ ++ down_write(¤t->mm->mmap_sem); ++ addr = do_mmap(obj->filp, 0, args->size, ++ PROT_READ | PROT_WRITE, MAP_SHARED, ++ args->offset); ++ up_write(¤t->mm->mmap_sem); ++ mutex_lock(&dev->struct_mutex); ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ if (IS_ERR((void *)addr)) ++ return addr; ++ ++ args->addr_ptr = (uint64_t) addr; ++ ++ return 0; ++} ++ ++static void ++i915_gem_object_free_page_list(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page_count = obj->size / PAGE_SIZE; ++ int i; ++ ++ if (obj_priv->page_list == NULL) ++ return; ++ ++ ++ for (i = 0; i < page_count; i++) ++ if (obj_priv->page_list[i] != NULL) { ++ if (obj_priv->dirty) ++ set_page_dirty(obj_priv->page_list[i]); ++ mark_page_accessed(obj_priv->page_list[i]); ++ page_cache_release(obj_priv->page_list[i]); ++ } ++ obj_priv->dirty = 0; ++ ++ drm_free(obj_priv->page_list, ++ page_count * sizeof(struct page *), ++ DRM_MEM_DRIVER); ++ obj_priv->page_list = NULL; ++} ++ ++static void ++i915_gem_object_move_to_active(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ /* Add a reference if we're newly entering the active list. */ ++ if (!obj_priv->active) { ++ drm_gem_object_reference(obj); ++ obj_priv->active = 1; ++ } ++ /* Move from whatever list we were on to the tail of execution. */ ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.active_list); ++} ++ ++ ++static void ++i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ if (obj_priv->pin_count != 0) ++ list_del_init(&obj_priv->list); ++ else ++ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); ++ ++ if (obj_priv->active) { ++ obj_priv->active = 0; ++ drm_gem_object_unreference(obj); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++} ++ ++/** ++ * Creates a new sequence number, emitting a write of it to the status page ++ * plus an interrupt, which will trigger i915_user_interrupt_handler. ++ * ++ * Must be called with struct_lock held. ++ * ++ * Returned sequence numbers are nonzero on success. ++ */ ++static uint32_t ++i915_add_request(struct drm_device *dev, uint32_t flush_domains) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_request *request; ++ uint32_t seqno; ++ int was_empty; ++ RING_LOCALS; ++ ++ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); ++ if (request == NULL) ++ return 0; ++ ++ /* Grab the seqno we're going to make this request be, and bump the ++ * next (skipping 0 so it can be the reserved no-seqno value). ++ */ ++ seqno = dev_priv->mm.next_gem_seqno; ++ dev_priv->mm.next_gem_seqno++; ++ if (dev_priv->mm.next_gem_seqno == 0) ++ dev_priv->mm.next_gem_seqno++; ++ ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_STORE_DWORD_INDEX); ++ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); ++ OUT_RING(seqno); ++ ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ ++ DRM_DEBUG("%d\n", seqno); ++ ++ request->seqno = seqno; ++ request->emitted_jiffies = jiffies; ++ request->flush_domains = flush_domains; ++ was_empty = list_empty(&dev_priv->mm.request_list); ++ list_add_tail(&request->list, &dev_priv->mm.request_list); ++ ++ if (was_empty) ++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ++ return seqno; ++} ++ ++/** ++ * Command execution barrier ++ * ++ * Ensures that all commands in the ring are finished ++ * before signalling the CPU ++ */ ++uint32_t ++i915_retire_commands(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; ++ uint32_t flush_domains = 0; ++ RING_LOCALS; ++ ++ /* The sampler always gets flushed on i965 (sigh) */ ++ if (IS_I965G(dev)) ++ flush_domains |= I915_GEM_DOMAIN_SAMPLER; ++ BEGIN_LP_RING(2); ++ OUT_RING(cmd); ++ OUT_RING(0); /* noop */ ++ ADVANCE_LP_RING(); ++ return flush_domains; ++} ++ ++/** ++ * Moves buffers associated only with the given active seqno from the active ++ * to inactive list, potentially freeing them. ++ */ ++static void ++i915_gem_retire_request(struct drm_device *dev, ++ struct drm_i915_gem_request *request) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (request->flush_domains != 0) { ++ struct drm_i915_gem_object *obj_priv, *next; ++ ++ /* First clear any buffers that were only waiting for a flush ++ * matching the one just retired. ++ */ ++ ++ list_for_each_entry_safe(obj_priv, next, ++ &dev_priv->mm.flushing_list, list) { ++ struct drm_gem_object *obj = obj_priv->obj; ++ ++ if (obj->write_domain & request->flush_domains) { ++ obj->write_domain = 0; ++ i915_gem_object_move_to_inactive(obj); ++ } ++ } ++ ++ } ++ ++ /* Move any buffers on the active list that are no longer referenced ++ * by the ringbuffer to the flushing/inactive lists as appropriate. ++ */ ++ while (!list_empty(&dev_priv->mm.active_list)) { ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj_priv = list_first_entry(&dev_priv->mm.active_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ /* If the seqno being retired doesn't match the oldest in the ++ * list, then the oldest in the list must still be newer than ++ * this seqno. ++ */ ++ if (obj_priv->last_rendering_seqno != request->seqno) ++ return; ++#if WATCH_LRU ++ DRM_INFO("%s: retire %d moves to inactive list %p\n", ++ __func__, request->seqno, obj); ++#endif ++ ++ /* If this request flushes the write domain, ++ * clear the write domain from the object now ++ */ ++ if (request->flush_domains & obj->write_domain) ++ obj->write_domain = 0; ++ ++ if (obj->write_domain != 0) { ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.flushing_list); ++ } else { ++ i915_gem_object_move_to_inactive(obj); ++ } ++ } ++} ++ ++/** ++ * Returns true if seq1 is later than seq2. ++ */ ++static int ++i915_seqno_passed(uint32_t seq1, uint32_t seq2) ++{ ++ return (int32_t)(seq1 - seq2) >= 0; ++} ++ ++uint32_t ++i915_get_gem_seqno(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); ++} ++ ++/** ++ * This function clears the request list as sequence numbers are passed. ++ */ ++void ++i915_gem_retire_requests(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t seqno; ++ ++ seqno = i915_get_gem_seqno(dev); ++ ++ while (!list_empty(&dev_priv->mm.request_list)) { ++ struct drm_i915_gem_request *request; ++ uint32_t retiring_seqno; ++ ++ request = list_first_entry(&dev_priv->mm.request_list, ++ struct drm_i915_gem_request, ++ list); ++ retiring_seqno = request->seqno; ++ ++ if (i915_seqno_passed(seqno, retiring_seqno) || ++ dev_priv->mm.wedged) { ++ i915_gem_retire_request(dev, request); ++ ++ list_del(&request->list); ++ drm_free(request, sizeof(*request), DRM_MEM_DRIVER); ++ } else ++ break; ++ } ++} ++ ++void ++i915_gem_retire_work_handler(struct work_struct *work) ++{ ++ drm_i915_private_t *dev_priv; ++ struct drm_device *dev; ++ ++ dev_priv = container_of(work, drm_i915_private_t, ++ mm.retire_work.work); ++ dev = dev_priv->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ i915_gem_retire_requests(dev); ++ if (!list_empty(&dev_priv->mm.request_list)) ++ schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++/** ++ * Waits for a sequence number to be signaled, and cleans up the ++ * request and object lists appropriately for that event. ++ */ ++int ++i915_wait_request(struct drm_device *dev, uint32_t seqno) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret = 0; ++ ++ BUG_ON(seqno == 0); ++ ++ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { ++ dev_priv->mm.waiting_gem_seqno = seqno; ++ i915_user_irq_on(dev_priv); ++ ret = wait_event_interruptible(dev_priv->irq_queue, ++ i915_seqno_passed(i915_get_gem_seqno(dev), ++ seqno) || ++ dev_priv->mm.wedged); ++ i915_user_irq_off(dev_priv); ++ dev_priv->mm.waiting_gem_seqno = 0; ++ } ++ if (dev_priv->mm.wedged) ++ ret = -EIO; ++ ++ if (ret && ret != -ERESTARTSYS) ++ DRM_ERROR("%s returns %d (awaiting %d at %d)\n", ++ __func__, ret, seqno, i915_get_gem_seqno(dev)); ++ ++ /* Directly dispatch request retiring. While we have the work queue ++ * to handle this, the waiter on a request often wants an associated ++ * buffer to have made it to the inactive list, and we would need ++ * a separate wait queue to handle that. ++ */ ++ if (ret == 0) ++ i915_gem_retire_requests(dev); ++ ++ return ret; ++} ++ ++static void ++i915_gem_flush(struct drm_device *dev, ++ uint32_t invalidate_domains, ++ uint32_t flush_domains) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t cmd; ++ RING_LOCALS; ++ ++#if WATCH_EXEC ++ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, ++ invalidate_domains, flush_domains); ++#endif ++ ++ if (flush_domains & I915_GEM_DOMAIN_CPU) ++ drm_agp_chipset_flush(dev); ++ ++ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) { ++ /* ++ * read/write caches: ++ * ++ * I915_GEM_DOMAIN_RENDER is always invalidated, but is ++ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is ++ * also flushed at 2d versus 3d pipeline switches. ++ * ++ * read-only caches: ++ * ++ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if ++ * MI_READ_FLUSH is set, and is always flushed on 965. ++ * ++ * I915_GEM_DOMAIN_COMMAND may not exist? ++ * ++ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is ++ * invalidated when MI_EXE_FLUSH is set. ++ * ++ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is ++ * invalidated with every MI_FLUSH. ++ * ++ * TLBs: ++ * ++ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND ++ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and ++ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER ++ * are flushed at any MI_FLUSH. ++ */ ++ ++ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; ++ if ((invalidate_domains|flush_domains) & ++ I915_GEM_DOMAIN_RENDER) ++ cmd &= ~MI_NO_WRITE_FLUSH; ++ if (!IS_I965G(dev)) { ++ /* ++ * On the 965, the sampler cache always gets flushed ++ * and this bit is reserved. ++ */ ++ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) ++ cmd |= MI_READ_FLUSH; ++ } ++ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) ++ cmd |= MI_EXE_FLUSH; ++ ++#if WATCH_EXEC ++ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); ++#endif ++ BEGIN_LP_RING(2); ++ OUT_RING(cmd); ++ OUT_RING(0); /* noop */ ++ ADVANCE_LP_RING(); ++ } ++} ++ ++/** ++ * Ensures that all rendering to the object has completed and the object is ++ * safe to unbind from the GTT or access from the CPU. ++ */ ++static int ++i915_gem_object_wait_rendering(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret; ++ uint32_t write_domain; ++ ++ /* If there are writes queued to the buffer, flush and ++ * create a new seqno to wait for. ++ */ ++ write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT); ++ if (write_domain) { ++#if WATCH_BUF ++ DRM_INFO("%s: flushing object %p from write domain %08x\n", ++ __func__, obj, write_domain); ++#endif ++ i915_gem_flush(dev, 0, write_domain); ++ ++ i915_gem_object_move_to_active(obj); ++ obj_priv->last_rendering_seqno = i915_add_request(dev, ++ write_domain); ++ BUG_ON(obj_priv->last_rendering_seqno == 0); ++#if WATCH_LRU ++ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); ++#endif ++ } ++ ++ /* If there is rendering queued on the buffer being evicted, wait for ++ * it. ++ */ ++ if (obj_priv->active) { ++#if WATCH_BUF ++ DRM_INFO("%s: object %p wait for seqno %08x\n", ++ __func__, obj, obj_priv->last_rendering_seqno); ++#endif ++ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); ++ if (ret != 0) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Unbinds an object from the GTT aperture. ++ */ ++static int ++i915_gem_object_unbind(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret = 0; ++ ++#if WATCH_BUF ++ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); ++ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); ++#endif ++ if (obj_priv->gtt_space == NULL) ++ return 0; ++ ++ if (obj_priv->pin_count != 0) { ++ DRM_ERROR("Attempting to unbind pinned buffer\n"); ++ return -EINVAL; ++ } ++ ++ /* Wait for any rendering to complete ++ */ ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) { ++ DRM_ERROR("wait_rendering failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* Move the object to the CPU domain to ensure that ++ * any possible CPU writes while it's not in the GTT ++ * are flushed when we go to remap it. This will ++ * also ensure that all pending GPU writes are finished ++ * before we unbind. ++ */ ++ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, ++ I915_GEM_DOMAIN_CPU); ++ if (ret) { ++ DRM_ERROR("set_domain failed: %d\n", ret); ++ return ret; ++ } ++ ++ if (obj_priv->agp_mem != NULL) { ++ drm_unbind_agp(obj_priv->agp_mem); ++ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); ++ obj_priv->agp_mem = NULL; ++ } ++ ++ BUG_ON(obj_priv->active); ++ ++ i915_gem_object_free_page_list(obj); ++ ++ if (obj_priv->gtt_space) { ++ atomic_dec(&dev->gtt_count); ++ atomic_sub(obj->size, &dev->gtt_memory); ++ ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ } ++ ++ /* Remove ourselves from the LRU list if present. */ ++ if (!list_empty(&obj_priv->list)) ++ list_del_init(&obj_priv->list); ++ ++ return 0; ++} ++ ++static int ++i915_gem_evict_something(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret = 0; ++ ++ for (;;) { ++ /* If there's an inactive buffer available now, grab it ++ * and be done. ++ */ ++ if (!list_empty(&dev_priv->mm.inactive_list)) { ++ obj_priv = list_first_entry(&dev_priv->mm.inactive_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ BUG_ON(obj_priv->pin_count != 0); ++#if WATCH_LRU ++ DRM_INFO("%s: evicting %p\n", __func__, obj); ++#endif ++ BUG_ON(obj_priv->active); ++ ++ /* Wait on the rendering and unbind the buffer. */ ++ ret = i915_gem_object_unbind(obj); ++ break; ++ } ++ ++ /* If we didn't get anything, but the ring is still processing ++ * things, wait for one of those things to finish and hopefully ++ * leave us a buffer to evict. ++ */ ++ if (!list_empty(&dev_priv->mm.request_list)) { ++ struct drm_i915_gem_request *request; ++ ++ request = list_first_entry(&dev_priv->mm.request_list, ++ struct drm_i915_gem_request, ++ list); ++ ++ ret = i915_wait_request(dev, request->seqno); ++ if (ret) ++ break; ++ ++ /* if waiting caused an object to become inactive, ++ * then loop around and wait for it. Otherwise, we ++ * assume that waiting freed and unbound something, ++ * so there should now be some space in the GTT ++ */ ++ if (!list_empty(&dev_priv->mm.inactive_list)) ++ continue; ++ break; ++ } ++ ++ /* If we didn't have anything on the request list but there ++ * are buffers awaiting a flush, emit one and try again. ++ * When we wait on it, those buffers waiting for that flush ++ * will get moved to inactive. ++ */ ++ if (!list_empty(&dev_priv->mm.flushing_list)) { ++ obj_priv = list_first_entry(&dev_priv->mm.flushing_list, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ i915_gem_flush(dev, ++ obj->write_domain, ++ obj->write_domain); ++ i915_add_request(dev, obj->write_domain); ++ ++ obj = NULL; ++ continue; ++ } ++ ++ DRM_ERROR("inactive empty %d request empty %d " ++ "flushing empty %d\n", ++ list_empty(&dev_priv->mm.inactive_list), ++ list_empty(&dev_priv->mm.request_list), ++ list_empty(&dev_priv->mm.flushing_list)); ++ /* If we didn't do any of the above, there's nothing to be done ++ * and we just can't fit it in. ++ */ ++ return -ENOMEM; ++ } ++ return ret; ++} ++ ++static int ++i915_gem_object_get_page_list(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page_count, i; ++ struct address_space *mapping; ++ struct inode *inode; ++ struct page *page; ++ int ret; ++ ++ if (obj_priv->page_list) ++ return 0; ++ ++ /* Get the list of pages out of our struct file. They'll be pinned ++ * at this point until we release them. ++ */ ++ page_count = obj->size / PAGE_SIZE; ++ BUG_ON(obj_priv->page_list != NULL); ++ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), ++ DRM_MEM_DRIVER); ++ if (obj_priv->page_list == NULL) { ++ DRM_ERROR("Faled to allocate page list\n"); ++ return -ENOMEM; ++ } ++ ++ inode = obj->filp->f_path.dentry->d_inode; ++ mapping = inode->i_mapping; ++ for (i = 0; i < page_count; i++) { ++ page = read_mapping_page(mapping, i, NULL); ++ if (IS_ERR(page)) { ++ ret = PTR_ERR(page); ++ DRM_ERROR("read_mapping_page failed: %d\n", ret); ++ i915_gem_object_free_page_list(obj); ++ return ret; ++ } ++ obj_priv->page_list[i] = page; ++ } ++ return 0; ++} ++ ++/** ++ * Finds free space in the GTT aperture and binds the object there. ++ */ ++static int ++i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ struct drm_mm_node *free_space; ++ int page_count, ret; ++ ++ if (alignment == 0) ++ alignment = PAGE_SIZE; ++ if (alignment & (PAGE_SIZE - 1)) { ++ DRM_ERROR("Invalid object alignment requested %u\n", alignment); ++ return -EINVAL; ++ } ++ ++ search_free: ++ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, ++ obj->size, alignment, 0); ++ if (free_space != NULL) { ++ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, ++ alignment); ++ if (obj_priv->gtt_space != NULL) { ++ obj_priv->gtt_space->private = obj; ++ obj_priv->gtt_offset = obj_priv->gtt_space->start; ++ } ++ } ++ if (obj_priv->gtt_space == NULL) { ++ /* If the gtt is empty and we're still having trouble ++ * fitting our object in, we're out of memory. ++ */ ++#if WATCH_LRU ++ DRM_INFO("%s: GTT full, evicting something\n", __func__); ++#endif ++ if (list_empty(&dev_priv->mm.inactive_list) && ++ list_empty(&dev_priv->mm.flushing_list) && ++ list_empty(&dev_priv->mm.active_list)) { ++ DRM_ERROR("GTT full, but LRU list empty\n"); ++ return -ENOMEM; ++ } ++ ++ ret = i915_gem_evict_something(dev); ++ if (ret != 0) { ++ DRM_ERROR("Failed to evict a buffer %d\n", ret); ++ return ret; ++ } ++ goto search_free; ++ } ++ ++#if WATCH_BUF ++ DRM_INFO("Binding object of size %d at 0x%08x\n", ++ obj->size, obj_priv->gtt_offset); ++#endif ++ ret = i915_gem_object_get_page_list(obj); ++ if (ret) { ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ return ret; ++ } ++ ++ page_count = obj->size / PAGE_SIZE; ++ /* Create an AGP memory structure pointing at our pages, and bind it ++ * into the GTT. ++ */ ++ obj_priv->agp_mem = drm_agp_bind_pages(dev, ++ obj_priv->page_list, ++ page_count, ++ obj_priv->gtt_offset); ++ if (obj_priv->agp_mem == NULL) { ++ i915_gem_object_free_page_list(obj); ++ drm_mm_put_block(obj_priv->gtt_space); ++ obj_priv->gtt_space = NULL; ++ return -ENOMEM; ++ } ++ atomic_inc(&dev->gtt_count); ++ atomic_add(obj->size, &dev->gtt_memory); ++ ++ /* Assert that the object is not currently in any GPU domain. As it ++ * wasn't in the GTT, there shouldn't be any way it could have been in ++ * a GPU cache ++ */ ++ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ ++ return 0; ++} ++ ++void ++i915_gem_clflush_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ /* If we don't have a page list set up, then we're not pinned ++ * to GPU, and we can ignore the cache flush because it'll happen ++ * again at bind time. ++ */ ++ if (obj_priv->page_list == NULL) ++ return; ++ ++ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE); ++} ++ ++/* ++ * Set the next domain for the specified object. This ++ * may not actually perform the necessary flushing/invaliding though, ++ * as that may want to be batched with other set_domain operations ++ * ++ * This is (we hope) the only really tricky part of gem. The goal ++ * is fairly simple -- track which caches hold bits of the object ++ * and make sure they remain coherent. A few concrete examples may ++ * help to explain how it works. For shorthand, we use the notation ++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the ++ * a pair of read and write domain masks. ++ * ++ * Case 1: the batch buffer ++ * ++ * 1. Allocated ++ * 2. Written by CPU ++ * 3. Mapped to GTT ++ * 4. Read by GPU ++ * 5. Unmapped from GTT ++ * 6. Freed ++ * ++ * Let's take these a step at a time ++ * ++ * 1. Allocated ++ * Pages allocated from the kernel may still have ++ * cache contents, so we set them to (CPU, CPU) always. ++ * 2. Written by CPU (using pwrite) ++ * The pwrite function calls set_domain (CPU, CPU) and ++ * this function does nothing (as nothing changes) ++ * 3. Mapped by GTT ++ * This function asserts that the object is not ++ * currently in any GPU-based read or write domains ++ * 4. Read by GPU ++ * i915_gem_execbuffer calls set_domain (COMMAND, 0). ++ * As write_domain is zero, this function adds in the ++ * current read domains (CPU+COMMAND, 0). ++ * flush_domains is set to CPU. ++ * invalidate_domains is set to COMMAND ++ * clflush is run to get data out of the CPU caches ++ * then i915_dev_set_domain calls i915_gem_flush to ++ * emit an MI_FLUSH and drm_agp_chipset_flush ++ * 5. Unmapped from GTT ++ * i915_gem_object_unbind calls set_domain (CPU, CPU) ++ * flush_domains and invalidate_domains end up both zero ++ * so no flushing/invalidating happens ++ * 6. Freed ++ * yay, done ++ * ++ * Case 2: The shared render buffer ++ * ++ * 1. Allocated ++ * 2. Mapped to GTT ++ * 3. Read/written by GPU ++ * 4. set_domain to (CPU,CPU) ++ * 5. Read/written by CPU ++ * 6. Read/written by GPU ++ * ++ * 1. Allocated ++ * Same as last example, (CPU, CPU) ++ * 2. Mapped to GTT ++ * Nothing changes (assertions find that it is not in the GPU) ++ * 3. Read/written by GPU ++ * execbuffer calls set_domain (RENDER, RENDER) ++ * flush_domains gets CPU ++ * invalidate_domains gets GPU ++ * clflush (obj) ++ * MI_FLUSH and drm_agp_chipset_flush ++ * 4. set_domain (CPU, CPU) ++ * flush_domains gets GPU ++ * invalidate_domains gets CPU ++ * wait_rendering (obj) to make sure all drawing is complete. ++ * This will include an MI_FLUSH to get the data from GPU ++ * to memory ++ * clflush (obj) to invalidate the CPU cache ++ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) ++ * 5. Read/written by CPU ++ * cache lines are loaded and dirtied ++ * 6. Read written by GPU ++ * Same as last GPU access ++ * ++ * Case 3: The constant buffer ++ * ++ * 1. Allocated ++ * 2. Written by CPU ++ * 3. Read by GPU ++ * 4. Updated (written) by CPU again ++ * 5. Read by GPU ++ * ++ * 1. Allocated ++ * (CPU, CPU) ++ * 2. Written by CPU ++ * (CPU, CPU) ++ * 3. Read by GPU ++ * (CPU+RENDER, 0) ++ * flush_domains = CPU ++ * invalidate_domains = RENDER ++ * clflush (obj) ++ * MI_FLUSH ++ * drm_agp_chipset_flush ++ * 4. Updated (written) by CPU again ++ * (CPU, CPU) ++ * flush_domains = 0 (no previous write domain) ++ * invalidate_domains = 0 (no new read domains) ++ * 5. Read by GPU ++ * (CPU+RENDER, 0) ++ * flush_domains = CPU ++ * invalidate_domains = RENDER ++ * clflush (obj) ++ * MI_FLUSH ++ * drm_agp_chipset_flush ++ */ ++static int ++i915_gem_object_set_domain(struct drm_gem_object *obj, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ uint32_t invalidate_domains = 0; ++ uint32_t flush_domains = 0; ++ int ret; ++ ++#if WATCH_BUF ++ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", ++ __func__, obj, ++ obj->read_domains, read_domains, ++ obj->write_domain, write_domain); ++#endif ++ /* ++ * If the object isn't moving to a new write domain, ++ * let the object stay in multiple read domains ++ */ ++ if (write_domain == 0) ++ read_domains |= obj->read_domains; ++ else ++ obj_priv->dirty = 1; ++ ++ /* ++ * Flush the current write domain if ++ * the new read domains don't match. Invalidate ++ * any read domains which differ from the old ++ * write domain ++ */ ++ if (obj->write_domain && obj->write_domain != read_domains) { ++ flush_domains |= obj->write_domain; ++ invalidate_domains |= read_domains & ~obj->write_domain; ++ } ++ /* ++ * Invalidate any read caches which may have ++ * stale data. That is, any new read domains. ++ */ ++ invalidate_domains |= read_domains & ~obj->read_domains; ++ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { ++#if WATCH_BUF ++ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", ++ __func__, flush_domains, invalidate_domains); ++#endif ++ /* ++ * If we're invaliding the CPU cache and flushing a GPU cache, ++ * then pause for rendering so that the GPU caches will be ++ * flushed before the cpu cache is invalidated ++ */ ++ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && ++ (flush_domains & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT))) { ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) ++ return ret; ++ } ++ i915_gem_clflush_object(obj); ++ } ++ ++ if ((write_domain | flush_domains) != 0) ++ obj->write_domain = write_domain; ++ ++ /* If we're invalidating the CPU domain, clear the per-page CPU ++ * domain list as well. ++ */ ++ if (obj_priv->page_cpu_valid != NULL && ++ (obj->read_domains & I915_GEM_DOMAIN_CPU) && ++ ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) { ++ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); ++ } ++ obj->read_domains = read_domains; ++ ++ dev->invalidate_domains |= invalidate_domains; ++ dev->flush_domains |= flush_domains; ++#if WATCH_BUF ++ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", ++ __func__, ++ obj->read_domains, obj->write_domain, ++ dev->invalidate_domains, dev->flush_domains); ++#endif ++ return 0; ++} ++ ++/** ++ * Set the read/write domain on a range of the object. ++ * ++ * Currently only implemented for CPU reads, otherwise drops to normal ++ * i915_gem_object_set_domain(). ++ */ ++static int ++i915_gem_object_set_domain_range(struct drm_gem_object *obj, ++ uint64_t offset, ++ uint64_t size, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret, i; ++ ++ if (obj->read_domains & I915_GEM_DOMAIN_CPU) ++ return 0; ++ ++ if (read_domains != I915_GEM_DOMAIN_CPU || ++ write_domain != 0) ++ return i915_gem_object_set_domain(obj, ++ read_domains, write_domain); ++ ++ /* Wait on any GPU rendering to the object to be flushed. */ ++ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { ++ ret = i915_gem_object_wait_rendering(obj); ++ if (ret) ++ return ret; ++ } ++ ++ if (obj_priv->page_cpu_valid == NULL) { ++ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, ++ DRM_MEM_DRIVER); ++ } ++ ++ /* Flush the cache on any pages that are still invalid from the CPU's ++ * perspective. ++ */ ++ for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) { ++ if (obj_priv->page_cpu_valid[i]) ++ continue; ++ ++ drm_ttm_cache_flush(obj_priv->page_list + i, 1); ++ ++ obj_priv->page_cpu_valid[i] = 1; ++ } ++ ++ return 0; ++} ++ ++/** ++ * Once all of the objects have been set in the proper domain, ++ * perform the necessary flush and invalidate operations. ++ * ++ * Returns the write domains flushed, for use in flush tracking. ++ */ ++static uint32_t ++i915_gem_dev_set_domain(struct drm_device *dev) ++{ ++ uint32_t flush_domains = dev->flush_domains; ++ ++ /* ++ * Now that all the buffers are synced to the proper domains, ++ * flush and invalidate the collected domains ++ */ ++ if (dev->invalidate_domains | dev->flush_domains) { ++#if WATCH_EXEC ++ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", ++ __func__, ++ dev->invalidate_domains, ++ dev->flush_domains); ++#endif ++ i915_gem_flush(dev, ++ dev->invalidate_domains, ++ dev->flush_domains); ++ dev->invalidate_domains = 0; ++ dev->flush_domains = 0; ++ } ++ ++ return flush_domains; ++} ++ ++/** ++ * Pin an object to the GTT and evaluate the relocations landing in it. ++ */ ++static int ++i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ struct drm_i915_gem_exec_object *entry) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_relocation_entry reloc; ++ struct drm_i915_gem_relocation_entry __user *relocs; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int i, ret; ++ uint32_t last_reloc_offset = -1; ++ void *reloc_page = NULL; ++ ++ /* Choose the GTT offset for our buffer and put it there. */ ++ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); ++ if (ret) ++ return ret; ++ ++ entry->offset = obj_priv->gtt_offset; ++ ++ relocs = (struct drm_i915_gem_relocation_entry __user *) ++ (uintptr_t) entry->relocs_ptr; ++ /* Apply the relocations, using the GTT aperture to avoid cache ++ * flushing requirements. ++ */ ++ for (i = 0; i < entry->relocation_count; i++) { ++ struct drm_gem_object *target_obj; ++ struct drm_i915_gem_object *target_obj_priv; ++ uint32_t reloc_val, reloc_offset, *reloc_entry; ++ int ret; ++ ++ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); ++ if (ret != 0) { ++ i915_gem_object_unpin(obj); ++ return ret; ++ } ++ ++ target_obj = drm_gem_object_lookup(obj->dev, file_priv, ++ reloc.target_handle); ++ if (target_obj == NULL) { ++ i915_gem_object_unpin(obj); ++ return -EBADF; ++ } ++ target_obj_priv = target_obj->driver_private; ++ ++ /* The target buffer should have appeared before us in the ++ * exec_object list, so it should have a GTT space bound by now. ++ */ ++ if (target_obj_priv->gtt_space == NULL) { ++ DRM_ERROR("No GTT space found for object %d\n", ++ reloc.target_handle); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++ if (reloc.offset > obj->size - 4) { ++ DRM_ERROR("Relocation beyond object bounds: " ++ "obj %p target %d offset %d size %d.\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset, (int) obj->size); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ if (reloc.offset & 3) { ++ DRM_ERROR("Relocation not 4-byte aligned: " ++ "obj %p target %d offset %d.\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++ if (reloc.write_domain && target_obj->pending_write_domain && ++ reloc.write_domain != target_obj->pending_write_domain) { ++ DRM_ERROR("Write domain conflict: " ++ "obj %p target %d offset %d " ++ "new %08x old %08x\n", ++ obj, reloc.target_handle, ++ (int) reloc.offset, ++ reloc.write_domain, ++ target_obj->pending_write_domain); ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -EINVAL; ++ } ++ ++#if WATCH_RELOC ++ DRM_INFO("%s: obj %p offset %08x target %d " ++ "read %08x write %08x gtt %08x " ++ "presumed %08x delta %08x\n", ++ __func__, ++ obj, ++ (int) reloc.offset, ++ (int) reloc.target_handle, ++ (int) reloc.read_domains, ++ (int) reloc.write_domain, ++ (int) target_obj_priv->gtt_offset, ++ (int) reloc.presumed_offset, ++ reloc.delta); ++#endif ++ ++ target_obj->pending_read_domains |= reloc.read_domains; ++ target_obj->pending_write_domain |= reloc.write_domain; ++ ++ /* If the relocation already has the right value in it, no ++ * more work needs to be done. ++ */ ++ if (target_obj_priv->gtt_offset == reloc.presumed_offset) { ++ drm_gem_object_unreference(target_obj); ++ continue; ++ } ++ ++ /* Now that we're going to actually write some data in, ++ * make sure that any rendering using this buffer's contents ++ * is completed. ++ */ ++ i915_gem_object_wait_rendering(obj); ++ ++ /* As we're writing through the gtt, flush ++ * any CPU writes before we write the relocations ++ */ ++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ obj->write_domain = 0; ++ } ++ ++ /* Map the page containing the relocation we're going to ++ * perform. ++ */ ++ reloc_offset = obj_priv->gtt_offset + reloc.offset; ++ if (reloc_page == NULL || ++ (last_reloc_offset & ~(PAGE_SIZE - 1)) != ++ (reloc_offset & ~(PAGE_SIZE - 1))) { ++ if (reloc_page != NULL) ++ iounmap(reloc_page); ++ ++ reloc_page = ioremap(dev->agp->base + ++ (reloc_offset & ~(PAGE_SIZE - 1)), ++ PAGE_SIZE); ++ last_reloc_offset = reloc_offset; ++ if (reloc_page == NULL) { ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return -ENOMEM; ++ } ++ } ++ ++ reloc_entry = (uint32_t *)((char *)reloc_page + ++ (reloc_offset & (PAGE_SIZE - 1))); ++ reloc_val = target_obj_priv->gtt_offset + reloc.delta; ++ ++#if WATCH_BUF ++ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", ++ obj, (unsigned int) reloc.offset, ++ readl(reloc_entry), reloc_val); ++#endif ++ writel(reloc_val, reloc_entry); ++ ++ /* Write the updated presumed offset for this entry back out ++ * to the user. ++ */ ++ reloc.presumed_offset = target_obj_priv->gtt_offset; ++ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); ++ if (ret != 0) { ++ drm_gem_object_unreference(target_obj); ++ i915_gem_object_unpin(obj); ++ return ret; ++ } ++ ++ drm_gem_object_unreference(target_obj); ++ } ++ ++ if (reloc_page != NULL) ++ iounmap(reloc_page); ++ ++#if WATCH_BUF ++ if (0) ++ i915_gem_dump_object(obj, 128, __func__, ~0); ++#endif ++ return 0; ++} ++ ++/** Dispatch a batchbuffer to the ring ++ */ ++static int ++i915_dispatch_gem_execbuffer(struct drm_device *dev, ++ struct drm_i915_gem_execbuffer *exec, ++ uint64_t exec_offset) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) ++ (uintptr_t) exec->cliprects_ptr; ++ int nbox = exec->num_cliprects; ++ int i = 0, count; ++ uint32_t exec_start, exec_len; ++ RING_LOCALS; ++ ++ exec_start = (uint32_t) exec_offset + exec->batch_start_offset; ++ exec_len = (uint32_t) exec->batch_len; ++ ++ if ((exec_start | exec_len) & 0x7) { ++ DRM_ERROR("alignment\n"); ++ return -EINVAL; ++ } ++ ++ if (!exec_start) ++ return -EINVAL; ++ ++ count = nbox ? nbox : 1; ++ ++ for (i = 0; i < count; i++) { ++ if (i < nbox) { ++ int ret = i915_emit_box(dev, boxes, i, ++ exec->DR1, exec->DR4); ++ if (ret) ++ return ret; ++ } ++ ++ if (IS_I830(dev) || IS_845G(dev)) { ++ BEGIN_LP_RING(4); ++ OUT_RING(MI_BATCH_BUFFER); ++ OUT_RING(exec_start | MI_BATCH_NON_SECURE); ++ OUT_RING(exec_start + exec_len - 4); ++ OUT_RING(0); ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(2); ++ if (IS_I965G(dev)) { ++ OUT_RING(MI_BATCH_BUFFER_START | ++ (2 << 6) | ++ MI_BATCH_NON_SECURE_I965); ++ OUT_RING(exec_start); ++ } else { ++ OUT_RING(MI_BATCH_BUFFER_START | ++ (2 << 6)); ++ OUT_RING(exec_start | MI_BATCH_NON_SECURE); ++ } ++ ADVANCE_LP_RING(); ++ } ++ } ++ ++ /* XXX breadcrumb */ ++ return 0; ++} ++ ++/* Throttle our rendering by waiting until the ring has completed our requests ++ * emitted over 20 msec ago. ++ * ++ * This should get us reasonable parallelism between CPU and GPU but also ++ * relatively low latency when blocking on a particular request to finish. ++ */ ++static int ++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ int ret = 0; ++ uint32_t seqno; ++ ++ mutex_lock(&dev->struct_mutex); ++ seqno = i915_file_priv->mm.last_gem_throttle_seqno; ++ i915_file_priv->mm.last_gem_throttle_seqno = ++ i915_file_priv->mm.last_gem_seqno; ++ if (seqno) ++ ret = i915_wait_request(dev, seqno); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++} ++ ++int ++i915_gem_execbuffer(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; ++ struct drm_i915_gem_execbuffer *args = data; ++ struct drm_i915_gem_exec_object *exec_list = NULL; ++ struct drm_gem_object **object_list = NULL; ++ struct drm_gem_object *batch_obj; ++ int ret, i, pinned = 0; ++ uint64_t exec_offset; ++ uint32_t seqno, flush_domains; ++ ++#if WATCH_EXEC ++ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", ++ (int) args->buffers_ptr, args->buffer_count, args->batch_len); ++#endif ++ ++ /* Copy in the exec list from userland */ ++ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, ++ DRM_MEM_DRIVER); ++ object_list = drm_calloc(sizeof(*object_list), args->buffer_count, ++ DRM_MEM_DRIVER); ++ if (exec_list == NULL || object_list == NULL) { ++ DRM_ERROR("Failed to allocate exec or object list " ++ "for %d buffers\n", ++ args->buffer_count); ++ ret = -ENOMEM; ++ goto pre_mutex_err; ++ } ++ ret = copy_from_user(exec_list, ++ (struct drm_i915_relocation_entry __user *) ++ (uintptr_t) args->buffers_ptr, ++ sizeof(*exec_list) * args->buffer_count); ++ if (ret != 0) { ++ DRM_ERROR("copy %d exec entries failed %d\n", ++ args->buffer_count, ret); ++ goto pre_mutex_err; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ if (dev_priv->mm.wedged) { ++ DRM_ERROR("Execbuf while wedged\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EIO; ++ } ++ ++ if (dev_priv->mm.suspended) { ++ DRM_ERROR("Execbuf while VT-switched.\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBUSY; ++ } ++ ++ /* Zero the gloabl flush/invalidate flags. These ++ * will be modified as each object is bound to the ++ * gtt ++ */ ++ dev->invalidate_domains = 0; ++ dev->flush_domains = 0; ++ ++ /* Look up object handles and perform the relocations */ ++ for (i = 0; i < args->buffer_count; i++) { ++ object_list[i] = drm_gem_object_lookup(dev, file_priv, ++ exec_list[i].handle); ++ if (object_list[i] == NULL) { ++ DRM_ERROR("Invalid object handle %d at index %d\n", ++ exec_list[i].handle, i); ++ ret = -EBADF; ++ goto err; ++ } ++ ++ object_list[i]->pending_read_domains = 0; ++ object_list[i]->pending_write_domain = 0; ++ ret = i915_gem_object_pin_and_relocate(object_list[i], ++ file_priv, ++ &exec_list[i]); ++ if (ret) { ++ DRM_ERROR("object bind and relocate failed %d\n", ret); ++ goto err; ++ } ++ pinned = i + 1; ++ } ++ ++ /* Set the pending read domains for the batch buffer to COMMAND */ ++ batch_obj = object_list[args->buffer_count-1]; ++ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; ++ batch_obj->pending_write_domain = 0; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ for (i = 0; i < args->buffer_count; i++) { ++ struct drm_gem_object *obj = object_list[i]; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ if (obj_priv->gtt_space == NULL) { ++ /* We evicted the buffer in the process of validating ++ * our set of buffers in. We could try to recover by ++ * kicking them everything out and trying again from ++ * the start. ++ */ ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ /* make sure all previous memory operations have passed */ ++ ret = i915_gem_object_set_domain(obj, ++ obj->pending_read_domains, ++ obj->pending_write_domain); ++ if (ret) ++ goto err; ++ } ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* Flush/invalidate caches and chipset buffer */ ++ flush_domains = i915_gem_dev_set_domain(dev); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++#if WATCH_COHERENCY ++ for (i = 0; i < args->buffer_count; i++) { ++ i915_gem_object_check_coherency(object_list[i], ++ exec_list[i].handle); ++ } ++#endif ++ ++ exec_offset = exec_list[args->buffer_count - 1].offset; ++ ++#if WATCH_EXEC ++ i915_gem_dump_object(object_list[args->buffer_count - 1], ++ args->batch_len, ++ __func__, ++ ~0); ++#endif ++ ++ /* Exec the batchbuffer */ ++ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); ++ if (ret) { ++ DRM_ERROR("dispatch failed %d\n", ret); ++ goto err; ++ } ++ ++ /* ++ * Ensure that the commands in the batch buffer are ++ * finished before the interrupt fires ++ */ ++ flush_domains |= i915_retire_commands(dev); ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* ++ * Get a seqno representing the execution of the current buffer, ++ * which we can wait on. We would like to mitigate these interrupts, ++ * likely by only creating seqnos occasionally (so that we have ++ * *some* interrupts representing completion of buffers that we can ++ * wait on when trying to clear up gtt space). ++ */ ++ seqno = i915_add_request(dev, flush_domains); ++ BUG_ON(seqno == 0); ++ i915_file_priv->mm.last_gem_seqno = seqno; ++ for (i = 0; i < args->buffer_count; i++) { ++ struct drm_gem_object *obj = object_list[i]; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_gem_object_move_to_active(obj); ++ obj_priv->last_rendering_seqno = seqno; ++#if WATCH_LRU ++ DRM_INFO("%s: move to exec list %p\n", __func__, obj); ++#endif ++ } ++#if WATCH_LRU ++ i915_dump_lru(dev, __func__); ++#endif ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ /* Copy the new buffer offsets back to the user's exec list. */ ++ ret = copy_to_user((struct drm_i915_relocation_entry __user *) ++ (uintptr_t) args->buffers_ptr, ++ exec_list, ++ sizeof(*exec_list) * args->buffer_count); ++ if (ret) ++ DRM_ERROR("failed to copy %d exec entries " ++ "back to user (%d)\n", ++ args->buffer_count, ret); ++err: ++ if (object_list != NULL) { ++ for (i = 0; i < pinned; i++) ++ i915_gem_object_unpin(object_list[i]); ++ ++ for (i = 0; i < args->buffer_count; i++) ++ drm_gem_object_unreference(object_list[i]); ++ } ++ mutex_unlock(&dev->struct_mutex); ++ ++pre_mutex_err: ++ drm_free(object_list, sizeof(*object_list) * args->buffer_count, ++ DRM_MEM_DRIVER); ++ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, ++ DRM_MEM_DRIVER); ++ ++ return ret; ++} ++ ++int ++i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int ret; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ if (obj_priv->gtt_space == NULL) { ++ ret = i915_gem_object_bind_to_gtt(obj, alignment); ++ if (ret != 0) { ++ DRM_ERROR("Failure to bind: %d", ret); ++ return ret; ++ } ++ } ++ obj_priv->pin_count++; ++ ++ /* If the object is not active and not pending a flush, ++ * remove it from the inactive list ++ */ ++ if (obj_priv->pin_count == 1) { ++ atomic_inc(&dev->pin_count); ++ atomic_add(obj->size, &dev->pin_memory); ++ if (!obj_priv->active && ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) == 0 && ++ !list_empty(&obj_priv->list)) ++ list_del_init(&obj_priv->list); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ ++ return 0; ++} ++ ++void ++i915_gem_object_unpin(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++ obj_priv->pin_count--; ++ BUG_ON(obj_priv->pin_count < 0); ++ BUG_ON(obj_priv->gtt_space == NULL); ++ ++ /* If the object is no longer pinned, and is ++ * neither active nor being flushed, then stick it on ++ * the inactive list ++ */ ++ if (obj_priv->pin_count == 0) { ++ if (!obj_priv->active && ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)) == 0) ++ list_move_tail(&obj_priv->list, ++ &dev_priv->mm.inactive_list); ++ atomic_dec(&dev->pin_count); ++ atomic_sub(obj->size, &dev->pin_memory); ++ } ++ i915_verify_inactive(dev, __FILE__, __LINE__); ++} ++ ++int ++i915_gem_pin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pin *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, args->alignment); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ ++ /* XXX - flush the CPU caches for pinned objects ++ * as the X server doesn't manage domains yet ++ */ ++ if (obj->write_domain & I915_GEM_DOMAIN_CPU) { ++ i915_gem_clflush_object(obj); ++ drm_agp_chipset_flush(dev); ++ obj->write_domain = 0; ++ } ++ args->offset = obj_priv->gtt_offset; ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++int ++i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_pin *args = data; ++ struct drm_gem_object *obj; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++ i915_gem_object_unpin(obj); ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_busy_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_busy *args = data; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ mutex_lock(&dev->struct_mutex); ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) { ++ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", ++ args->handle); ++ mutex_unlock(&dev->struct_mutex); ++ return -EBADF; ++ } ++ ++ obj_priv = obj->driver_private; ++ args->busy = obj_priv->active; ++ ++ drm_gem_object_unreference(obj); ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_throttle_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return i915_gem_ring_throttle(dev, file_priv); ++} ++ ++int i915_gem_init_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); ++ if (obj_priv == NULL) ++ return -ENOMEM; ++ ++ /* ++ * We've just allocated pages from the kernel, ++ * so they've just been written by the CPU with ++ * zeros. They'll need to be clflushed before we ++ * use them with the GPU. ++ */ ++ obj->write_domain = I915_GEM_DOMAIN_CPU; ++ obj->read_domains = I915_GEM_DOMAIN_CPU; ++ ++ obj->driver_private = obj_priv; ++ obj_priv->obj = obj; ++ INIT_LIST_HEAD(&obj_priv->list); ++ return 0; ++} ++ ++void i915_gem_free_object(struct drm_gem_object *obj) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ ++ while (obj_priv->pin_count > 0) ++ i915_gem_object_unpin(obj); ++ ++ i915_gem_object_unbind(obj); ++ ++ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); ++ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); ++} ++ ++int ++i915_gem_set_domain(struct drm_gem_object *obj, ++ struct drm_file *file_priv, ++ uint32_t read_domains, ++ uint32_t write_domain) ++{ ++ struct drm_device *dev = obj->dev; ++ int ret; ++ uint32_t flush_domains; ++ ++ BUG_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ ret = i915_gem_object_set_domain(obj, read_domains, write_domain); ++ if (ret) ++ return ret; ++ flush_domains = i915_gem_dev_set_domain(obj->dev); ++ ++ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) ++ (void) i915_add_request(dev, flush_domains); ++ ++ return 0; ++} ++ ++/** Unbinds all objects that are on the given buffer list. */ ++static int ++i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) ++{ ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ while (!list_empty(head)) { ++ obj_priv = list_first_entry(head, ++ struct drm_i915_gem_object, ++ list); ++ obj = obj_priv->obj; ++ ++ if (obj_priv->pin_count != 0) { ++ DRM_ERROR("Pinned object in unbind list\n"); ++ mutex_unlock(&dev->struct_mutex); ++ return -EINVAL; ++ } ++ ++ ret = i915_gem_object_unbind(obj); ++ if (ret != 0) { ++ DRM_ERROR("Error unbinding object in LeaveVT: %d\n", ++ ret); ++ mutex_unlock(&dev->struct_mutex); ++ return ret; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++static int ++i915_gem_idle(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ uint32_t seqno, cur_seqno, last_seqno; ++ int stuck; ++ ++ if (dev_priv->mm.suspended) ++ return 0; ++ ++ /* Hack! Don't let anybody do execbuf while we don't control the chip. ++ * We need to replace this with a semaphore, or something. ++ */ ++ dev_priv->mm.suspended = 1; ++ ++ i915_kernel_lost_context(dev); ++ ++ /* Flush the GPU along with all non-CPU write domains ++ */ ++ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ++ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); ++ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT)); ++ ++ if (seqno == 0) { ++ mutex_unlock(&dev->struct_mutex); ++ return -ENOMEM; ++ } ++ ++ dev_priv->mm.waiting_gem_seqno = seqno; ++ last_seqno = 0; ++ stuck = 0; ++ for (;;) { ++ cur_seqno = i915_get_gem_seqno(dev); ++ if (i915_seqno_passed(cur_seqno, seqno)) ++ break; ++ if (last_seqno == cur_seqno) { ++ if (stuck++ > 100) { ++ DRM_ERROR("hardware wedged\n"); ++ dev_priv->mm.wedged = 1; ++ DRM_WAKEUP(&dev_priv->irq_queue); ++ break; ++ } ++ } ++ msleep(10); ++ last_seqno = cur_seqno; ++ } ++ dev_priv->mm.waiting_gem_seqno = 0; ++ ++ i915_gem_retire_requests(dev); ++ ++ /* Active and flushing should now be empty as we've ++ * waited for a sequence higher than any pending execbuffer ++ */ ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ ++ /* Request should now be empty as we've also waited ++ * for the last request in the list ++ */ ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ ++ /* Move all buffers out of the GTT. */ ++ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); ++ ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ return 0; ++} ++ ++static int ++i915_gem_init_hws(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ /* If we need a physical address for the status page, it's already ++ * initialized at driver load time. ++ */ ++ if (!I915_NEED_GFX_HWS(dev)) ++ return 0; ++ ++ obj = drm_gem_object_alloc(dev, 4096); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate status page\n"); ++ return -ENOMEM; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ return ret; ++ } ++ ++ dev_priv->status_gfx_addr = obj_priv->gtt_offset; ++ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; ++ dev_priv->hws_map.size = 4096; ++ dev_priv->hws_map.type = 0; ++ dev_priv->hws_map.flags = 0; ++ dev_priv->hws_map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->hws_map, dev); ++ if (dev_priv->hws_map.handle == NULL) { ++ DRM_ERROR("Failed to map status page.\n"); ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ dev_priv->hws_obj = obj; ++ dev_priv->hw_status_page = dev_priv->hws_map.handle; ++ memset(dev_priv->hw_status_page, 0, PAGE_SIZE); ++ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); ++ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); ++ ++ return 0; ++} ++ ++static int ++i915_gem_init_ringbuffer(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ int ret; ++ ++ ret = i915_gem_init_hws(dev); ++ if (ret != 0) ++ return ret; ++ ++ obj = drm_gem_object_alloc(dev, 128 * 1024); ++ if (obj == NULL) { ++ DRM_ERROR("Failed to allocate ringbuffer\n"); ++ return -ENOMEM; ++ } ++ obj_priv = obj->driver_private; ++ ++ ret = i915_gem_object_pin(obj, 4096); ++ if (ret != 0) { ++ drm_gem_object_unreference(obj); ++ return ret; ++ } ++ ++ /* Set up the kernel mapping for the ring. */ ++ dev_priv->ring.Size = obj->size; ++ dev_priv->ring.tail_mask = obj->size - 1; ++ ++ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; ++ dev_priv->ring.map.size = obj->size; ++ dev_priv->ring.map.type = 0; ++ dev_priv->ring.map.flags = 0; ++ dev_priv->ring.map.mtrr = 0; ++ ++ drm_core_ioremap(&dev_priv->ring.map, dev); ++ if (dev_priv->ring.map.handle == NULL) { ++ DRM_ERROR("Failed to map ringbuffer.\n"); ++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); ++ drm_gem_object_unreference(obj); ++ return -EINVAL; ++ } ++ dev_priv->ring.ring_obj = obj; ++ dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ++ ++ /* Stop the ring if it's running. */ ++ I915_WRITE(PRB0_CTL, 0); ++ I915_WRITE(PRB0_HEAD, 0); ++ I915_WRITE(PRB0_TAIL, 0); ++ I915_WRITE(PRB0_START, 0); ++ ++ /* Initialize the ring. */ ++ I915_WRITE(PRB0_START, obj_priv->gtt_offset); ++ I915_WRITE(PRB0_CTL, ++ ((obj->size - 4096) & RING_NR_PAGES) | ++ RING_NO_REPORT | ++ RING_VALID); ++ ++ /* Update our cache of the ring state */ ++ i915_kernel_lost_context(dev); ++ ++ return 0; ++} ++ ++static void ++i915_gem_cleanup_ringbuffer(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->ring.ring_obj == NULL) ++ return; ++ ++ drm_core_ioremapfree(&dev_priv->ring.map, dev); ++ ++ i915_gem_object_unpin(dev_priv->ring.ring_obj); ++ drm_gem_object_unreference(dev_priv->ring.ring_obj); ++ dev_priv->ring.ring_obj = NULL; ++ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); ++ ++ if (dev_priv->hws_obj != NULL) { ++ i915_gem_object_unpin(dev_priv->hws_obj); ++ drm_gem_object_unreference(dev_priv->hws_obj); ++ dev_priv->hws_obj = NULL; ++ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); ++ ++ /* Write high address into HWS_PGA when disabling. */ ++ I915_WRITE(HWS_PGA, 0x1ffff000); ++ } ++} ++ ++int ++i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (dev_priv->mm.wedged) { ++ DRM_ERROR("Reenabling wedged hardware, good luck\n"); ++ dev_priv->mm.wedged = 0; ++ } ++ ++ ret = i915_gem_init_ringbuffer(dev); ++ if (ret != 0) ++ return ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ BUG_ON(!list_empty(&dev_priv->mm.active_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); ++ BUG_ON(!list_empty(&dev_priv->mm.request_list)); ++ dev_priv->mm.suspended = 0; ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++} ++ ++int ++i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ int ret; ++ ++ mutex_lock(&dev->struct_mutex); ++ ret = i915_gem_idle(dev); ++ if (ret == 0) ++ i915_gem_cleanup_ringbuffer(dev); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return 0; ++} ++ ++void ++i915_gem_lastclose(struct drm_device *dev) ++{ ++ int ret; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (dev_priv->ring.ring_obj != NULL) { ++ ret = i915_gem_idle(dev); ++ if (ret) ++ DRM_ERROR("failed to idle hardware: %d\n", ret); ++ ++ i915_gem_cleanup_ringbuffer(dev); ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++void i915_gem_load(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ INIT_LIST_HEAD(&dev_priv->mm.active_list); ++ INIT_LIST_HEAD(&dev_priv->mm.flushing_list); ++ INIT_LIST_HEAD(&dev_priv->mm.inactive_list); ++ INIT_LIST_HEAD(&dev_priv->mm.request_list); ++ INIT_DELAYED_WORK(&dev_priv->mm.retire_work, ++ i915_gem_retire_work_handler); ++ dev_priv->mm.next_gem_seqno = 1; ++ ++ i915_gem_detect_bit_6_swizzle(dev); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_debug.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c +--- git/drivers/gpu/drm-tungsten/i915_gem_debug.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,202 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Packard ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if WATCH_INACTIVE ++void ++i915_verify_inactive(struct drm_device *dev, char *file, int line) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { ++ obj = obj_priv->obj; ++ if (obj_priv->pin_count || obj_priv->active || ++ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | ++ I915_GEM_DOMAIN_GTT))) ++ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", ++ obj, ++ obj_priv->pin_count, obj_priv->active, ++ obj->write_domain, file, line); ++ } ++} ++#endif /* WATCH_INACTIVE */ ++ ++ ++#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE ++static void ++i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, ++ uint32_t bias, uint32_t mark) ++{ ++ uint32_t *mem = kmap_atomic(page, KM_USER0); ++ int i; ++ for (i = start; i < end; i += 4) ++ DRM_INFO("%08x: %08x%s\n", ++ (int) (bias + i), mem[i / 4], ++ (bias + i == mark) ? " ********" : ""); ++ kunmap_atomic(mem, KM_USER0); ++ /* give syslog time to catch up */ ++ msleep(1); ++} ++ ++void ++i915_gem_dump_object(struct drm_gem_object *obj, int len, ++ const char *where, uint32_t mark) ++{ ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page; ++ ++ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); ++ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { ++ int page_len, chunk, chunk_len; ++ ++ page_len = len - page * PAGE_SIZE; ++ if (page_len > PAGE_SIZE) ++ page_len = PAGE_SIZE; ++ ++ for (chunk = 0; chunk < page_len; chunk += 128) { ++ chunk_len = page_len - chunk; ++ if (chunk_len > 128) ++ chunk_len = 128; ++ i915_gem_dump_page(obj_priv->page_list[page], ++ chunk, chunk + chunk_len, ++ obj_priv->gtt_offset + ++ page * PAGE_SIZE, ++ mark); ++ } ++ } ++} ++#endif ++ ++#if WATCH_LRU ++void ++i915_dump_lru(struct drm_device *dev, const char *where) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ ++ DRM_INFO("active list %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, ++ list) ++ { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++ DRM_INFO("flushing list %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, ++ list) ++ { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++ DRM_INFO("inactive %s {\n", where); ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { ++ DRM_INFO(" %p: %08x\n", obj_priv, ++ obj_priv->last_rendering_seqno); ++ } ++ DRM_INFO("}\n"); ++} ++#endif ++ ++ ++#if WATCH_COHERENCY ++void ++i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) ++{ ++ struct drm_device *dev = obj->dev; ++ struct drm_i915_gem_object *obj_priv = obj->driver_private; ++ int page; ++ uint32_t *gtt_mapping; ++ uint32_t *backing_map = NULL; ++ int bad_count = 0; ++ ++ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", ++ __func__, obj, obj_priv->gtt_offset, handle, ++ obj->size / 1024); ++ ++ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, ++ obj->size); ++ if (gtt_mapping == NULL) { ++ DRM_ERROR("failed to map GTT space\n"); ++ return; ++ } ++ ++ for (page = 0; page < obj->size / PAGE_SIZE; page++) { ++ int i; ++ ++ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); ++ ++ if (backing_map == NULL) { ++ DRM_ERROR("failed to map backing page\n"); ++ goto out; ++ } ++ ++ for (i = 0; i < PAGE_SIZE / 4; i++) { ++ uint32_t cpuval = backing_map[i]; ++ uint32_t gttval = readl(gtt_mapping + ++ page * 1024 + i); ++ ++ if (cpuval != gttval) { ++ DRM_INFO("incoherent CPU vs GPU at 0x%08x: " ++ "0x%08x vs 0x%08x\n", ++ (int)(obj_priv->gtt_offset + ++ page * PAGE_SIZE + i * 4), ++ cpuval, gttval); ++ if (bad_count++ >= 8) { ++ DRM_INFO("...\n"); ++ goto out; ++ } ++ } ++ } ++ kunmap_atomic(backing_map, KM_USER0); ++ backing_map = NULL; ++ } ++ ++ out: ++ if (backing_map != NULL) ++ kunmap_atomic(backing_map, KM_USER0); ++ iounmap(gtt_mapping); ++ ++ /* give syslog time to catch up */ ++ msleep(1); ++ ++ /* Directly flush the object, since we just loaded values with the CPU ++ * from the backing pages and we don't want to disturb the cache ++ * management that we're trying to observe. ++ */ ++ ++ i915_gem_clflush_object(obj); ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_proc.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c +--- git/drivers/gpu/drm-tungsten/i915_gem_proc.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,293 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * Keith Packard ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_compat.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++static int i915_gem_active_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Active:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", ++ obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_flushing_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Flushing:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_inactive_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_object *obj_priv; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Inactive:\n"); ++ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, ++ list) ++ { ++ struct drm_gem_object *obj = obj_priv->obj; ++ if (obj->name) { ++ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", ++ obj, obj->name, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } else { ++ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, ++ obj->read_domains, obj->write_domain, ++ obj_priv->last_rendering_seqno); ++ } ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_request_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_i915_gem_request *gem_request; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Request:\n"); ++ list_for_each_entry(gem_request, &dev_priv->mm.request_list, ++ list) ++ { ++ DRM_PROC_PRINT(" %d @ %d %08x\n", ++ gem_request->seqno, ++ (int) (jiffies - gem_request->emitted_jiffies), ++ gem_request->flush_domains); ++ } ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static int i915_gem_seqno_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); ++ DRM_PROC_PRINT("Waiter sequence: %d\n", ++ dev_priv->mm.waiting_gem_seqno); ++ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++ ++static int i915_interrupt_info(char *buf, char **start, off_t offset, ++ int request, int *eof, void *data) ++{ ++ struct drm_minor *minor = (struct drm_minor *) data; ++ struct drm_device *dev = minor->dev; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ int len = 0; ++ ++ if (offset > DRM_PROC_LIMIT) { ++ *eof = 1; ++ return 0; ++ } ++ ++ *start = &buf[offset]; ++ *eof = 0; ++ DRM_PROC_PRINT("Interrupt enable: %08x\n", ++ I915_READ(IER)); ++ DRM_PROC_PRINT("Interrupt identity: %08x\n", ++ I915_READ(IIR)); ++ DRM_PROC_PRINT("Interrupt mask: %08x\n", ++ I915_READ(IMR)); ++ DRM_PROC_PRINT("Pipe A stat: %08x\n", ++ I915_READ(PIPEASTAT)); ++ DRM_PROC_PRINT("Pipe B stat: %08x\n", ++ I915_READ(PIPEBSTAT)); ++ DRM_PROC_PRINT("Interrupts received: %d\n", ++ atomic_read(&dev_priv->irq_received)); ++ DRM_PROC_PRINT("Current sequence: %d\n", ++ i915_get_gem_seqno(dev)); ++ DRM_PROC_PRINT("Waiter sequence: %d\n", ++ dev_priv->mm.waiting_gem_seqno); ++ DRM_PROC_PRINT("IRQ sequence: %d\n", ++ dev_priv->mm.irq_gem_seqno); ++ if (len > request + offset) ++ return request; ++ *eof = 1; ++ return len - offset; ++} ++ ++static struct drm_proc_list { ++ /** file name */ ++ const char *name; ++ /** proc callback*/ ++ int (*f) (char *, char **, off_t, int, int *, void *); ++} i915_gem_proc_list[] = { ++ {"i915_gem_active", i915_gem_active_info}, ++ {"i915_gem_flushing", i915_gem_flushing_info}, ++ {"i915_gem_inactive", i915_gem_inactive_info}, ++ {"i915_gem_request", i915_gem_request_info}, ++ {"i915_gem_seqno", i915_gem_seqno_info}, ++ {"i915_gem_interrupt", i915_interrupt_info}, ++}; ++ ++#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) ++ ++int i915_gem_proc_init(struct drm_minor *minor) ++{ ++ struct proc_dir_entry *ent; ++ int i, j; ++ ++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { ++ ent = create_proc_entry(i915_gem_proc_list[i].name, ++ S_IFREG | S_IRUGO, minor->dev_root); ++ if (!ent) { ++ DRM_ERROR("Cannot create /proc/dri/.../%s\n", ++ i915_gem_proc_list[i].name); ++ for (j = 0; j < i; j++) ++ remove_proc_entry(i915_gem_proc_list[i].name, ++ minor->dev_root); ++ return -1; ++ } ++ ent->read_proc = i915_gem_proc_list[i].f; ++ ent->data = minor; ++ } ++ return 0; ++} ++ ++void i915_gem_proc_cleanup(struct drm_minor *minor) ++{ ++ int i; ++ ++ if (!minor->dev_root) ++ return; ++ ++ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) ++ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_tiling.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c +--- git/drivers/gpu/drm-tungsten/i915_gem_tiling.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,309 @@ ++/* ++ * Copyright © 2008 Intel Corporation ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ * ++ * Authors: ++ * Eric Anholt ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/** @file i915_gem_tiling.c ++ * ++ * Support for managing tiling state of buffer objects. ++ * ++ * The idea behind tiling is to increase cache hit rates by rearranging ++ * pixel data so that a group of pixel accesses are in the same cacheline. ++ * Performance improvement from doing this on the back/depth buffer are on ++ * the order of 30%. ++ * ++ * Intel architectures make this somewhat more complicated, though, by ++ * adjustments made to addressing of data when the memory is in interleaved ++ * mode (matched pairs of DIMMS) to improve memory bandwidth. ++ * For interleaved memory, the CPU sends every sequential 64 bytes ++ * to an alternate memory channel so it can get the bandwidth from both. ++ * ++ * The GPU also rearranges its accesses for increased bandwidth to interleaved ++ * memory, and it matches what the CPU does for non-tiled. However, when tiled ++ * it does it a little differently, since one walks addresses not just in the ++ * X direction but also Y. So, along with alternating channels when bit ++ * 6 of the address flips, it also alternates when other bits flip -- Bits 9 ++ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) ++ * are common to both the 915 and 965-class hardware. ++ * ++ * The CPU also sometimes XORs in higher bits as well, to improve ++ * bandwidth doing strided access like we do so frequently in graphics. This ++ * is called "Channel XOR Randomization" in the MCH documentation. The result ++ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address ++ * decode. ++ * ++ * All of this bit 6 XORing has an effect on our memory management, ++ * as we need to make sure that the 3d driver can correctly address object ++ * contents. ++ * ++ * If we don't have interleaved memory, all tiling is safe and no swizzling is ++ * required. ++ * ++ * When bit 17 is XORed in, we simply refuse to tile at all. Bit ++ * 17 is not just a page offset, so as we page an objet out and back in, ++ * individual pages in it will have different bit 17 addresses, resulting in ++ * each 64 bytes being swapped with its neighbor! ++ * ++ * Otherwise, if interleaved, we have to tell the 3d driver what the address ++ * swizzling it needs to do is, since it's writing with the CPU to the pages ++ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the ++ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling ++ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order ++ * to match what the GPU expects. ++ */ ++ ++/** ++ * Detects bit 6 swizzling of address lookup between IGD access and CPU ++ * access through main memory. ++ */ ++void ++i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct pci_dev *bridge; ++ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ int mchbar_offset; ++ char __iomem *mchbar; ++ int ret; ++ ++ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); ++ if (bridge == NULL) { ++ DRM_ERROR("Couldn't get bridge device\n"); ++ return; ++ } ++ ++ ret = pci_enable_device(bridge); ++ if (ret != 0) { ++ DRM_ERROR("pci_enable_device failed: %d\n", ret); ++ return; ++ } ++ ++ if (IS_I965G(dev)) ++ mchbar_offset = 0x48; ++ else ++ mchbar_offset = 0x44; ++ ++ /* Use resource 2 for our BAR that's stashed in a nonstandard location, ++ * since the bridge would only ever use standard BARs 0-1 (though it ++ * doesn't anyway) ++ */ ++ ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]); ++ if (ret != 0) { ++ DRM_ERROR("pci_read_base failed: %d\n", ret); ++ return; ++ } ++ ++ mchbar = ioremap(pci_resource_start(bridge, 2), ++ pci_resource_len(bridge, 2)); ++ if (mchbar == NULL) { ++ DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n"); ++ return; ++ } ++ ++ if (IS_I965G(dev) && !IS_I965GM(dev)) { ++ uint32_t chdecmisc; ++ ++ /* On the 965, channel interleave appears to be determined by ++ * the flex bit. If flex is set, then the ranks (sides of a ++ * DIMM) of memory will be "stacked" (physical addresses walk ++ * through one rank then move on to the next, flipping channels ++ * or not depending on rank configuration). The GPU in this ++ * case does exactly the same addressing as the CPU. ++ * ++ * Unlike the 945, channel randomization based does not ++ * appear to be available. ++ * ++ * XXX: While the G965 doesn't appear to do any interleaving ++ * when the DIMMs are not exactly matched, the G4x chipsets ++ * might be for "L-shaped" configurations, and will need to be ++ * detected. ++ * ++ * L-shaped configuration: ++ * ++ * +-----+ ++ * | | ++ * |DIMM2| <-- non-interleaved ++ * +-----+ ++ * +-----+ +-----+ ++ * | | | | ++ * |DIMM0| |DIMM1| <-- interleaved area ++ * +-----+ +-----+ ++ */ ++ chdecmisc = readb(mchbar + CHDECMISC); ++ ++ if (chdecmisc == 0xff) { ++ DRM_ERROR("Couldn't read from MCHBAR. " ++ "Disabling tiling.\n"); ++ } else if (chdecmisc & CHDECMISC_FLEXMEMORY) { ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ } else { ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10; ++ swizzle_y = I915_BIT_6_SWIZZLE_9; ++ } ++ } else if (IS_I9XX(dev)) { ++ uint32_t dcc; ++ ++ /* On 915-945 and GM965, channel interleave by the CPU is ++ * determined by DCC. The CPU will alternate based on bit 6 ++ * in interleaved mode, and the GPU will then also alternate ++ * on bit 6, 9, and 10 for X, but the CPU may also optionally ++ * alternate based on bit 17 (XOR not disabled and XOR ++ * bit == 17). ++ */ ++ dcc = readl(mchbar + DCC); ++ switch (dcc & DCC_ADDRESSING_MODE_MASK) { ++ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: ++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ break; ++ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: ++ if (IS_I915G(dev) || IS_I915GM(dev) || ++ dcc & DCC_CHANNEL_XOR_DISABLE) { ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10; ++ swizzle_y = I915_BIT_6_SWIZZLE_9; ++ } else if (IS_I965GM(dev)) { ++ /* GM965 only does bit 11-based channel ++ * randomization ++ */ ++ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; ++ swizzle_y = I915_BIT_6_SWIZZLE_9_11; ++ } else { ++ /* Bit 17 or perhaps other swizzling */ ++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ } ++ break; ++ } ++ if (dcc == 0xffffffff) { ++ DRM_ERROR("Couldn't read from MCHBAR. " ++ "Disabling tiling.\n"); ++ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; ++ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; ++ } ++ } else { ++ /* As far as we know, the 865 doesn't have these bit 6 ++ * swizzling issues. ++ */ ++ swizzle_x = I915_BIT_6_SWIZZLE_NONE; ++ swizzle_y = I915_BIT_6_SWIZZLE_NONE; ++ } ++ ++ iounmap(mchbar); ++ ++ dev_priv->mm.bit_6_swizzle_x = swizzle_x; ++ dev_priv->mm.bit_6_swizzle_y = swizzle_y; ++} ++ ++/** ++ * Sets the tiling mode of an object, returning the required swizzling of ++ * bit 6 of addresses in the object. ++ */ ++int ++i915_gem_set_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_set_tiling *args = data; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ obj_priv = obj->driver_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ if (args->tiling_mode == I915_TILING_NONE) { ++ obj_priv->tiling_mode = I915_TILING_NONE; ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ } else { ++ if (args->tiling_mode == I915_TILING_X) ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; ++ else ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; ++ /* If we can't handle the swizzling, make it untiled. */ ++ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { ++ args->tiling_mode = I915_TILING_NONE; ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ } ++ } ++ obj_priv->tiling_mode = args->tiling_mode; ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_gem_object_unreference(obj); ++ ++ return 0; ++} ++ ++/** ++ * Returns the current tiling mode and required bit 6 swizzling for the object. ++ */ ++int ++i915_gem_get_tiling(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_i915_gem_get_tiling *args = data; ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct drm_i915_gem_object *obj_priv; ++ ++ obj = drm_gem_object_lookup(dev, file_priv, args->handle); ++ if (obj == NULL) ++ return -EINVAL; ++ obj_priv = obj->driver_private; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ args->tiling_mode = obj_priv->tiling_mode; ++ switch (obj_priv->tiling_mode) { ++ case I915_TILING_X: ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; ++ break; ++ case I915_TILING_Y: ++ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; ++ break; ++ case I915_TILING_NONE: ++ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; ++ break; ++ default: ++ DRM_ERROR("unknown tiling mode\n"); ++ } ++ ++ mutex_unlock(&dev->struct_mutex); ++ ++ drm_gem_object_unreference(obj); ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_ioc32.c git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c +--- git/drivers/gpu/drm-tungsten/i915_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,284 @@ ++/** ++ * \file i915_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the i915 DRM. ++ * ++ * \author Alan Hourihane ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Alan Hourihane 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++typedef struct _drm_i915_batchbuffer32 { ++ int start; /* agp offset */ ++ int used; /* nr bytes in use */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ u32 cliprects; /* pointer to userspace cliprects */ ++} drm_i915_batchbuffer32_t; ++ ++static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_batchbuffer32_t batchbuffer32; ++ drm_i915_batchbuffer_t __user *batchbuffer; ++ ++ if (copy_from_user ++ (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) ++ return -EFAULT; ++ ++ batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); ++ if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) ++ || __put_user(batchbuffer32.start, &batchbuffer->start) ++ || __put_user(batchbuffer32.used, &batchbuffer->used) ++ || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) ++ || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) ++ || __put_user(batchbuffer32.num_cliprects, ++ &batchbuffer->num_cliprects) ++ || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, ++ &batchbuffer->cliprects)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_BATCHBUFFER, ++ (unsigned long) batchbuffer); ++} ++ ++typedef struct _drm_i915_cmdbuffer32 { ++ u32 buf; /* pointer to userspace command buffer */ ++ int sz; /* nr bytes in buf */ ++ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ ++ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ ++ int num_cliprects; /* mulitpass with multiple cliprects? */ ++ u32 cliprects; /* pointer to userspace cliprects */ ++} drm_i915_cmdbuffer32_t; ++ ++static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_cmdbuffer32_t cmdbuffer32; ++ drm_i915_cmdbuffer_t __user *cmdbuffer; ++ ++ if (copy_from_user ++ (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) ++ return -EFAULT; ++ ++ cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); ++ if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) ++ || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, ++ &cmdbuffer->buf) ++ || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) ++ || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) ++ || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) ++ || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) ++ || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, ++ &cmdbuffer->cliprects)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer); ++} ++ ++typedef struct drm_i915_irq_emit32 { ++ u32 irq_seq; ++} drm_i915_irq_emit32_t; ++ ++static int compat_i915_irq_emit(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_irq_emit32_t req32; ++ drm_i915_irq_emit_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((int __user *)(unsigned long)req32.irq_seq, ++ &request->irq_seq)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request); ++} ++typedef struct drm_i915_getparam32 { ++ int param; ++ u32 value; ++} drm_i915_getparam32_t; ++ ++static int compat_i915_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_getparam32_t req32; ++ drm_i915_getparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_GETPARAM, (unsigned long) request); ++} ++ ++typedef struct drm_i915_mem_alloc32 { ++ int region; ++ int alignment; ++ int size; ++ u32 region_offset; /* offset from start of fb or agp */ ++} drm_i915_mem_alloc32_t; ++ ++static int compat_i915_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_mem_alloc32_t req32; ++ drm_i915_mem_alloc_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.region, &request->region) ++ || __put_user(req32.alignment, &request->alignment) ++ || __put_user(req32.size, &request->size) ++ || __put_user((void __user *)(unsigned long)req32.region_offset, ++ &request->region_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_ALLOC, (unsigned long) request); ++} ++ ++typedef struct drm_i915_execbuffer32 { ++ uint64_t ops_list; ++ uint32_t num_buffers; ++ struct _drm_i915_batchbuffer32 batch; ++ drm_context_t context; ++ struct drm_fence_arg fence_arg; ++} drm_i915_execbuffer32_t; ++ ++static int compat_i915_execbuffer(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_i915_execbuffer32_t req32; ++ struct drm_i915_execbuffer __user *request; ++ int err; ++ ++ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.ops_list, &request->ops_list) ++ || __put_user(req32.num_buffers, &request->num_buffers) ++ || __put_user(req32.context, &request->context) ++ || __copy_to_user(&request->fence_arg, &req32.fence_arg, ++ sizeof(req32.fence_arg)) ++ || __put_user(req32.batch.start, &request->batch.start) ++ || __put_user(req32.batch.used, &request->batch.used) ++ || __put_user(req32.batch.DR1, &request->batch.DR1) ++ || __put_user(req32.batch.DR4, &request->batch.DR4) ++ || __put_user(req32.batch.num_cliprects, ++ &request->batch.num_cliprects) ++ || __put_user((int __user *)(unsigned long)req32.batch.cliprects, ++ &request->batch.cliprects)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request); ++ ++ if (err) ++ return err; ++ ++ if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle) ++ || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class) ++ || __get_user(req32.fence_arg.type, &request->fence_arg.type) ++ || __get_user(req32.fence_arg.flags, &request->fence_arg.flags) ++ || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled) ++ || __get_user(req32.fence_arg.error, &request->fence_arg.error) ++ || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &req32, sizeof(req32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++ ++drm_ioctl_compat_t *i915_compat_ioctls[] = { ++ [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, ++ [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, ++ [DRM_I915_GETPARAM] = compat_i915_getparam, ++ [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, ++ [DRM_I915_ALLOC] = compat_i915_alloc, ++#ifdef I915_HAVE_BUFFER ++ [DRM_I915_EXECBUFFER] = compat_i915_execbuffer, ++#endif ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) ++ fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_irq.c git-nokia/drivers/gpu/drm-tungsten/i915_irq.c +--- git/drivers/gpu/drm-tungsten/i915_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1005 @@ ++/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#define MAX_NOPID ((u32)~0) ++ ++/* ++ * These are the interrupts used by the driver ++ */ ++#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ ++ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ ++ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) ++ ++static inline void ++i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask) ++{ ++ if ((dev_priv->irq_mask_reg & mask) != 0) { ++ dev_priv->irq_mask_reg &= ~mask; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++} ++ ++static inline void ++i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask) ++{ ++ if ((dev_priv->irq_mask_reg & mask) != mask) { ++ dev_priv->irq_mask_reg |= mask; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++} ++ ++/** ++ * i915_get_pipe - return the the pipe associated with a given plane ++ * @dev: DRM device ++ * @plane: plane to look for ++ * ++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number ++ * rather than a pipe number, since they may not always be equal. This routine ++ * maps the given @plane back to a pipe number. ++ */ ++static int ++i915_get_pipe(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 dspcntr; ++ ++ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); ++ ++ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; ++} ++ ++/** ++ * i915_get_plane - return the the plane associated with a given pipe ++ * @dev: DRM device ++ * @pipe: pipe to look for ++ * ++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number ++ * rather than a plane number, since they may not always be equal. This routine ++ * maps the given @pipe back to a plane number. ++ */ ++static int ++i915_get_plane(struct drm_device *dev, int pipe) ++{ ++ if (i915_get_pipe(dev, 0) == pipe) ++ return 0; ++ return 1; ++} ++ ++/** ++ * i915_pipe_enabled - check if a pipe is enabled ++ * @dev: DRM device ++ * @pipe: pipe to check ++ * ++ * Reading certain registers when the pipe is disabled can hang the chip. ++ * Use this routine to make sure the PLL is running and the pipe is active ++ * before reading such registers if unsure. ++ */ ++static int ++i915_pipe_enabled(struct drm_device *dev, int pipe) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; ++ ++ if (I915_READ(pipeconf) & PIPEACONF_ENABLE) ++ return 1; ++ ++ return 0; ++} ++ ++/** ++ * Emit a synchronous flip. ++ * ++ * This function must be called with the drawable spinlock held. ++ */ ++static void ++i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, ++ int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u16 x1, y1, x2, y2; ++ int pf_planes = 1 << plane; ++ ++ DRM_SPINLOCK_ASSERT(&dev->drw_lock); ++ ++ /* If the window is visible on the other plane, we have to flip on that ++ * plane as well. ++ */ ++ if (plane == 1) { ++ x1 = sarea_priv->planeA_x; ++ y1 = sarea_priv->planeA_y; ++ x2 = x1 + sarea_priv->planeA_w; ++ y2 = y1 + sarea_priv->planeA_h; ++ } else { ++ x1 = sarea_priv->planeB_x; ++ y1 = sarea_priv->planeB_y; ++ x2 = x1 + sarea_priv->planeB_w; ++ y2 = y1 + sarea_priv->planeB_h; ++ } ++ ++ if (x2 > 0 && y2 > 0) { ++ int i, num_rects = drw->num_rects; ++ struct drm_clip_rect *rect = drw->rects; ++ ++ for (i = 0; i < num_rects; i++) ++ if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || ++ rect[i].x2 <= x1 || rect[i].y2 <= y1)) { ++ pf_planes = 0x3; ++ ++ break; ++ } ++ } ++ ++ i915_dispatch_flip(dev, pf_planes, 1); ++} ++ ++/** ++ * Emit blits for scheduled buffer swaps. ++ * ++ * This function will be called with the HW lock held. ++ */ ++static void i915_vblank_tasklet(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ struct list_head *list, *tmp, hits, *hit; ++ int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; ++ unsigned counter[2]; ++ struct drm_drawable_info *drw; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 cpp = dev_priv->cpp, offsets[3]; ++ u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | ++ XY_SRC_COPY_BLT_WRITE_ALPHA | ++ XY_SRC_COPY_BLT_WRITE_RGB) ++ : XY_SRC_COPY_BLT_CMD; ++ u32 src_pitch = sarea_priv->pitch * cpp; ++ u32 dst_pitch = sarea_priv->pitch * cpp; ++ /* COPY rop (0xcc), map cpp to magic color depth constants */ ++ u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); ++ RING_LOCALS; ++ ++ if (IS_I965G(dev) && sarea_priv->front_tiled) { ++ cmd |= XY_SRC_COPY_BLT_DST_TILED; ++ dst_pitch >>= 2; ++ } ++ if (IS_I965G(dev) && sarea_priv->back_tiled) { ++ cmd |= XY_SRC_COPY_BLT_SRC_TILED; ++ src_pitch >>= 2; ++ } ++ ++ counter[0] = drm_vblank_count(dev, 0); ++ counter[1] = drm_vblank_count(dev, 1); ++ ++ DRM_DEBUG("\n"); ++ ++ INIT_LIST_HEAD(&hits); ++ ++ nhits = nrects = 0; ++ ++ /* No irqsave/restore necessary. This tasklet may be run in an ++ * interrupt context or normal context, but we don't have to worry ++ * about getting interrupted by something acquiring the lock, because ++ * we are the interrupt context thing that acquires the lock. ++ */ ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ ++ /* Find buffer swaps scheduled for this vertical blank */ ++ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { ++ drm_i915_vbl_swap_t *vbl_swap = ++ list_entry(list, drm_i915_vbl_swap_t, head); ++ int pipe = i915_get_pipe(dev, vbl_swap->plane); ++ ++ if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) ++ continue; ++ ++ list_del(list); ++ dev_priv->swaps_pending--; ++ drm_vblank_put(dev, pipe); ++ ++ DRM_SPINUNLOCK(&dev_priv->swaps_lock); ++ DRM_SPINLOCK(&dev->drw_lock); ++ ++ drw = drm_get_drawable_info(dev, vbl_swap->drw_id); ++ ++ if (!drw) { ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ continue; ++ } ++ ++ list_for_each(hit, &hits) { ++ drm_i915_vbl_swap_t *swap_cmp = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ struct drm_drawable_info *drw_cmp = ++ drm_get_drawable_info(dev, swap_cmp->drw_id); ++ ++ if (drw_cmp && ++ drw_cmp->rects[0].y1 > drw->rects[0].y1) { ++ list_add_tail(list, hit); ++ break; ++ } ++ } ++ ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ ++ /* List of hits was empty, or we reached the end of it */ ++ if (hit == &hits) ++ list_add_tail(list, hits.prev); ++ ++ nhits++; ++ ++ DRM_SPINLOCK(&dev_priv->swaps_lock); ++ } ++ ++ DRM_SPINUNLOCK(&dev_priv->swaps_lock); ++ ++ if (nhits == 0) { ++ return; ++ } ++ ++ i915_kernel_lost_context(dev); ++ ++ upper[0] = upper[1] = 0; ++ slice[0] = max(sarea_priv->planeA_h / nhits, 1); ++ slice[1] = max(sarea_priv->planeB_h / nhits, 1); ++ lower[0] = sarea_priv->planeA_y + slice[0]; ++ lower[1] = sarea_priv->planeB_y + slice[0]; ++ ++ offsets[0] = sarea_priv->front_offset; ++ offsets[1] = sarea_priv->back_offset; ++ offsets[2] = sarea_priv->third_offset; ++ num_pages = sarea_priv->third_handle ? 3 : 2; ++ ++ DRM_SPINLOCK(&dev->drw_lock); ++ ++ /* Emit blits for buffer swaps, partitioning both outputs into as many ++ * slices as there are buffer swaps scheduled in order to avoid tearing ++ * (based on the assumption that a single buffer swap would always ++ * complete before scanout starts). ++ */ ++ for (i = 0; i++ < nhits; ++ upper[0] = lower[0], lower[0] += slice[0], ++ upper[1] = lower[1], lower[1] += slice[1]) { ++ int init_drawrect = 1; ++ ++ if (i == nhits) ++ lower[0] = lower[1] = sarea_priv->height; ++ ++ list_for_each(hit, &hits) { ++ drm_i915_vbl_swap_t *swap_hit = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ struct drm_clip_rect *rect; ++ int num_rects, plane, front, back; ++ unsigned short top, bottom; ++ ++ drw = drm_get_drawable_info(dev, swap_hit->drw_id); ++ ++ if (!drw) ++ continue; ++ ++ plane = swap_hit->plane; ++ ++ if (swap_hit->flip) { ++ i915_dispatch_vsync_flip(dev, drw, plane); ++ continue; ++ } ++ ++ if (init_drawrect) { ++ int width = sarea_priv->width; ++ int height = sarea_priv->height; ++ if (IS_I965G(dev)) { ++ BEGIN_LP_RING(4); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO_I965); ++ OUT_RING(0); ++ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ } else { ++ BEGIN_LP_RING(6); ++ ++ OUT_RING(GFX_OP_DRAWRECT_INFO); ++ OUT_RING(0); ++ OUT_RING(0); ++ OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); ++ OUT_RING(0); ++ OUT_RING(0); ++ ++ ADVANCE_LP_RING(); ++ } ++ ++ sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; ++ ++ init_drawrect = 0; ++ } ++ ++ rect = drw->rects; ++ top = upper[plane]; ++ bottom = lower[plane]; ++ ++ front = (dev_priv->sarea_priv->pf_current_page >> ++ (2 * plane)) & 0x3; ++ back = (front + 1) % num_pages; ++ ++ for (num_rects = drw->num_rects; num_rects--; rect++) { ++ int y1 = max(rect->y1, top); ++ int y2 = min(rect->y2, bottom); ++ ++ if (y1 >= y2) ++ continue; ++ ++ BEGIN_LP_RING(8); ++ ++ OUT_RING(cmd); ++ OUT_RING(ropcpp | dst_pitch); ++ OUT_RING((y1 << 16) | rect->x1); ++ OUT_RING((y2 << 16) | rect->x2); ++ OUT_RING(offsets[front]); ++ OUT_RING((y1 << 16) | rect->x1); ++ OUT_RING(src_pitch); ++ OUT_RING(offsets[back]); ++ ++ ADVANCE_LP_RING(); ++ } ++ } ++ } ++ ++ DRM_SPINUNLOCK(&dev->drw_lock); ++ ++ list_for_each_safe(hit, tmp, &hits) { ++ drm_i915_vbl_swap_t *swap_hit = ++ list_entry(hit, drm_i915_vbl_swap_t, head); ++ ++ list_del(hit); ++ ++ drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); ++ } ++} ++ ++u32 i915_get_vblank_counter(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ unsigned long high_frame; ++ unsigned long low_frame; ++ u32 high1, high2, low, count; ++ int pipe; ++ ++ pipe = i915_get_pipe(dev, plane); ++ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; ++ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; ++ ++ if (!i915_pipe_enabled(dev, pipe)) { ++ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); ++ return 0; ++ } ++ ++ /* ++ * High & low register fields aren't synchronized, so make sure ++ * we get a low value that's stable across two reads of the high ++ * register. ++ */ ++ do { ++ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> ++ PIPE_FRAME_HIGH_SHIFT); ++ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> ++ PIPE_FRAME_LOW_SHIFT); ++ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> ++ PIPE_FRAME_HIGH_SHIFT); ++ } while (high1 != high2); ++ ++ count = (high1 << 8) | low; ++ ++ return count; ++} ++ ++irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 iir; ++ u32 pipea_stats = 0, pipeb_stats = 0; ++ int vblank = 0; ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) ++ I915_WRITE(IMR, ~0); ++#endif ++ iir = I915_READ(IIR); ++#if 0 ++ DRM_DEBUG("flag=%08x\n", iir); ++#endif ++ atomic_inc(&dev_priv->irq_received); ++ if (iir == 0) { ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) { ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ (void) I915_READ(IMR); ++ } ++#endif ++ return IRQ_NONE; ++ } ++ ++ /* ++ * Clear the PIPE(A|B)STAT regs before the IIR otherwise ++ * we may get extra interrupts. ++ */ ++ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { ++ pipea_stats = I915_READ(PIPEASTAT); ++ if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| ++ PIPE_VBLANK_INTERRUPT_STATUS)) ++ { ++ vblank++; ++ drm_handle_vblank(dev, i915_get_plane(dev, 0)); ++ } ++ I915_WRITE(PIPEASTAT, pipea_stats); ++ } ++ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { ++ pipeb_stats = I915_READ(PIPEBSTAT); ++ /* Ack the event */ ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ ++ /* The vblank interrupt gets enabled even if we didn't ask for ++ it, so make sure it's shut down again */ ++ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) ++ pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE); ++ ++ if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| ++ PIPE_VBLANK_INTERRUPT_STATUS)) ++ { ++ vblank++; ++ drm_handle_vblank(dev, i915_get_plane(dev, 1)); ++ } ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE) ++ opregion_asle_intr(dev); ++#endif ++#endif ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ } ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ if (iir & I915_ASLE_INTERRUPT) ++ opregion_asle_intr(dev); ++#endif ++#endif ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); ++ ++ I915_WRITE(IIR, iir); ++#ifdef __linux__ ++ if (dev->pdev->msi_enabled) ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++#endif ++ (void) I915_READ(IIR); /* Flush posted writes */ ++ ++ if (iir & I915_USER_INTERRUPT) { ++#ifdef I915_HAVE_GEM ++ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); ++#endif ++ DRM_WAKEUP(&dev_priv->irq_queue); ++#ifdef I915_HAVE_FENCE ++ i915_fence_handler(dev); ++#endif ++ } ++ ++ if (vblank) { ++ if (dev_priv->swaps_pending > 0) ++ drm_locked_tasklet(dev, i915_vblank_tasklet); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++int i915_emit_irq(struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ i915_kernel_lost_context(dev); ++ ++ DRM_DEBUG("\n"); ++ ++ i915_emit_breadcrumb(dev); ++ ++ BEGIN_LP_RING(2); ++ OUT_RING(0); ++ OUT_RING(MI_USER_INTERRUPT); ++ ADVANCE_LP_RING(); ++ ++ return dev_priv->counter; ++} ++ ++void i915_user_irq_on(drm_i915_private_t *dev_priv) ++{ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)) ++ i915_enable_irq(dev_priv, I915_USER_INTERRUPT); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++} ++ ++void i915_user_irq_off(drm_i915_private_t *dev_priv) ++{ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++#ifdef __linux__ ++ BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0); ++#endif ++ if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) ++ i915_disable_irq(dev_priv, I915_USER_INTERRUPT); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++} ++ ++ ++int i915_wait_irq(struct drm_device * dev, int irq_nr) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int ret = 0; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, ++ READ_BREADCRUMB(dev_priv)); ++ ++ if (READ_BREADCRUMB(dev_priv) >= irq_nr) { ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = ++ READ_BREADCRUMB(dev_priv); ++ return 0; ++ } ++ ++ i915_user_irq_on(dev_priv); ++ DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, ++ READ_BREADCRUMB(dev_priv) >= irq_nr); ++ i915_user_irq_off(dev_priv); ++ ++ if (ret == -EBUSY) { ++ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", ++ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); ++ } ++ ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->last_dispatch = ++ READ_BREADCRUMB(dev_priv); ++ return ret; ++} ++ ++/* Needs the lock as it touches the ring. ++ */ ++int i915_irq_emit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_irq_emit_t *emit = data; ++ int result; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ result = i915_emit_irq(dev); ++ ++ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++/* Doesn't need the hardware lock. ++ */ ++int i915_irq_wait(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_irq_wait_t *irqwait = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return i915_wait_irq(dev, irqwait->irq_seq); ++} ++ ++int i915_enable_vblank(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int pipe = i915_get_pipe(dev, plane); ++ u32 pipestat_reg = 0; ++ u32 mask_reg = 0; ++ u32 pipestat; ++ ++ switch (pipe) { ++ case 0: ++ pipestat_reg = PIPEASTAT; ++ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; ++ break; ++ case 1: ++ pipestat_reg = PIPEBSTAT; ++ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", ++ pipe); ++ break; ++ } ++ ++ if (pipestat_reg) ++ { ++ pipestat = I915_READ (pipestat_reg); ++ /* ++ * Older chips didn't have the start vblank interrupt, ++ * but ++ */ ++ if (IS_I965G (dev)) ++ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; ++ else ++ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; ++ /* ++ * Clear any pending status ++ */ ++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | ++ PIPE_VBLANK_INTERRUPT_STATUS); ++ I915_WRITE(pipestat_reg, pipestat); ++ } ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ i915_enable_irq(dev_priv, mask_reg); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++ ++ return 0; ++} ++ ++void i915_disable_vblank(struct drm_device *dev, int plane) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int pipe = i915_get_pipe(dev, plane); ++ u32 pipestat_reg = 0; ++ u32 mask_reg = 0; ++ u32 pipestat; ++ ++ switch (pipe) { ++ case 0: ++ pipestat_reg = PIPEASTAT; ++ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; ++ break; ++ case 1: ++ pipestat_reg = PIPEBSTAT; ++ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ break; ++ default: ++ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", ++ pipe); ++ break; ++ } ++ ++ DRM_SPINLOCK(&dev_priv->user_irq_lock); ++ i915_disable_irq(dev_priv, mask_reg); ++ DRM_SPINUNLOCK(&dev_priv->user_irq_lock); ++ ++ if (pipestat_reg) ++ { ++ pipestat = I915_READ (pipestat_reg); ++ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | ++ PIPE_VBLANK_INTERRUPT_ENABLE); ++ /* ++ * Clear any pending status ++ */ ++ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | ++ PIPE_VBLANK_INTERRUPT_STATUS); ++ I915_WRITE(pipestat_reg, pipestat); ++ (void) I915_READ(pipestat_reg); ++ } ++} ++ ++static void i915_enable_interrupt (struct drm_device *dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ dev_priv->irq_mask_reg = ~0; ++ I915_WRITE(IMR, dev_priv->irq_mask_reg); ++ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); ++ (void) I915_READ (IER); ++ ++#ifdef __linux__ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++ opregion_enable_asle(dev); ++#endif ++#endif ++ ++ dev_priv->irq_enabled = 1; ++} ++ ++/* Set the vblank monitor pipe ++ */ ++int i915_vblank_pipe_set(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int i915_vblank_pipe_get(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_vblank_pipe_t *pipe = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; ++ ++ return 0; ++} ++ ++/** ++ * Schedule buffer swap at given vertical blank. ++ */ ++int i915_vblank_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_vblank_swap_t *swap = data; ++ drm_i915_vbl_swap_t *vbl_swap; ++ unsigned int pipe, seqtype, curseq, plane; ++ unsigned long irqflags; ++ struct list_head *list; ++ int ret; ++ ++ if (!dev_priv) { ++ DRM_ERROR("%s called with no initialization\n", __func__); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) { ++ DRM_DEBUG("Rotation not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | ++ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | ++ _DRM_VBLANK_FLIP)) { ++ DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); ++ return -EINVAL; ++ } ++ ++ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; ++ pipe = i915_get_pipe(dev, plane); ++ ++ seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); ++ ++ if (!(dev_priv->vblank_pipe & (1 << pipe))) { ++ DRM_ERROR("Invalid pipe %d\n", pipe); ++ return -EINVAL; ++ } ++ ++ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); ++ ++ /* It makes no sense to schedule a swap for a drawable that doesn't have ++ * valid information at this point. E.g. this could mean that the X ++ * server is too old to push drawable information to the DRM, in which ++ * case all such swaps would become ineffective. ++ */ ++ if (!drm_get_drawable_info(dev, swap->drawable)) { ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); ++ return -EINVAL; ++ } ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ ++ /* ++ * We take the ref here and put it when the swap actually completes ++ * in the tasklet. ++ */ ++ ret = drm_vblank_get(dev, pipe); ++ if (ret) ++ return ret; ++ curseq = drm_vblank_count(dev, pipe); ++ ++ if (seqtype == _DRM_VBLANK_RELATIVE) ++ swap->sequence += curseq; ++ ++ if ((curseq - swap->sequence) <= (1<<23)) { ++ if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { ++ swap->sequence = curseq + 1; ++ } else { ++ DRM_DEBUG("Missed target sequence\n"); ++ drm_vblank_put(dev, pipe); ++ return -EINVAL; ++ } ++ } ++ ++ if (swap->seqtype & _DRM_VBLANK_FLIP) { ++ swap->sequence--; ++ ++ if ((curseq - swap->sequence) <= (1<<23)) { ++ struct drm_drawable_info *drw; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); ++ ++ drw = drm_get_drawable_info(dev, swap->drawable); ++ ++ if (!drw) { ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, ++ irqflags); ++ DRM_DEBUG("Invalid drawable ID %d\n", ++ swap->drawable); ++ drm_vblank_put(dev, pipe); ++ return -EINVAL; ++ } ++ ++ i915_dispatch_vsync_flip(dev, drw, plane); ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); ++ ++ drm_vblank_put(dev, pipe); ++ return 0; ++ } ++ } ++ ++ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); ++ ++ list_for_each(list, &dev_priv->vbl_swaps.head) { ++ vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); ++ ++ if (vbl_swap->drw_id == swap->drawable && ++ vbl_swap->plane == plane && ++ vbl_swap->sequence == swap->sequence) { ++ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ DRM_DEBUG("Already scheduled\n"); ++ return 0; ++ } ++ } ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ ++ if (dev_priv->swaps_pending >= 100) { ++ DRM_DEBUG("Too many swaps queued\n"); ++ drm_vblank_put(dev, pipe); ++ return -EBUSY; ++ } ++ ++ vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); ++ ++ if (!vbl_swap) { ++ DRM_ERROR("Failed to allocate memory to queue swap\n"); ++ drm_vblank_put(dev, pipe); ++ return -ENOMEM; ++ } ++ ++ DRM_DEBUG("\n"); ++ ++ vbl_swap->drw_id = swap->drawable; ++ vbl_swap->plane = plane; ++ vbl_swap->sequence = swap->sequence; ++ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); ++ ++ if (vbl_swap->flip) ++ swap->sequence++; ++ ++ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); ++ ++ list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); ++ dev_priv->swaps_pending++; ++ ++ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); ++ ++ return 0; ++} ++ ++/* drm_dma.h hooks ++*/ ++void i915_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ ++ I915_WRITE16(HWSTAM, 0xeffe); ++ I915_WRITE16(IMR, 0x0); ++ I915_WRITE16(IER, 0x0); ++} ++ ++int i915_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ int ret, num_pipes = 2; ++ ++ INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); ++ dev_priv->swaps_pending = 0; ++ ++ dev_priv->user_irq_refcount = 0; ++ dev_priv->irq_mask_reg = ~0; ++ ++ ret = drm_vblank_init(dev, num_pipes); ++ if (ret) ++ return ret; ++ ++ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; ++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ ++ ++ i915_enable_interrupt(dev); ++ DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); ++ ++ /* ++ * Initialize the hardware status page IRQ location. ++ */ ++ ++ I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); ++ return 0; ++} ++ ++void i915_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ++ u32 temp; ++ ++ if (!dev_priv) ++ return; ++ ++ dev_priv->vblank_pipe = 0; ++ ++ dev_priv->irq_enabled = 0; ++ I915_WRITE(HWSTAM, 0xffffffff); ++ I915_WRITE(IMR, 0xffffffff); ++ I915_WRITE(IER, 0x0); ++ ++ temp = I915_READ(PIPEASTAT); ++ I915_WRITE(PIPEASTAT, temp); ++ temp = I915_READ(PIPEBSTAT); ++ I915_WRITE(PIPEBSTAT, temp); ++ temp = I915_READ(IIR); ++ I915_WRITE(IIR, temp); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_mem.c git-nokia/drivers/gpu/drm-tungsten/i915_mem.c +--- git/drivers/gpu/drm-tungsten/i915_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_mem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,386 @@ ++/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- ++ */ ++/* ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++/* This memory manager is integrated into the global/local lru ++ * mechanisms used by the clients. Specifically, it operates by ++ * setting the 'in_use' fields of the global LRU to indicate whether ++ * this region is privately allocated to a client. ++ * ++ * This does require the client to actually respect that field. ++ * ++ * Currently no effort is made to allocate 'private' memory in any ++ * clever way - the LRU information isn't used to determine which ++ * block to allocate, and the ring is drained prior to allocations -- ++ * in other words allocation is expensive. ++ */ ++static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_tex_region *list; ++ unsigned shift, nr; ++ unsigned start; ++ unsigned end; ++ unsigned i; ++ int age; ++ ++ shift = dev_priv->tex_lru_log_granularity; ++ nr = I915_NR_TEX_REGIONS; ++ ++ start = p->start >> shift; ++ end = (p->start + p->size - 1) >> shift; ++ ++ age = ++sarea_priv->texAge; ++ list = sarea_priv->texList; ++ ++ /* Mark the regions with the new flag and update their age. Move ++ * them to head of list to preserve LRU semantics. ++ */ ++ for (i = start; i <= end; i++) { ++ list[i].in_use = in_use; ++ list[i].age = age; ++ ++ /* remove_from_list(i) ++ */ ++ list[(unsigned)list[i].next].prev = list[i].prev; ++ list[(unsigned)list[i].prev].next = list[i].next; ++ ++ /* insert_at_head(list, i) ++ */ ++ list[i].prev = nr; ++ list[i].next = list[nr].next; ++ list[(unsigned)list[nr].next].prev = i; ++ list[nr].next = i; ++ } ++} ++ ++/* Very simple allocator for agp memory, working on a static range ++ * already mapped into each client's address space. ++ */ ++ ++static struct mem_block *split_block(struct mem_block *p, int start, int size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++ out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++static struct mem_block *alloc_block(struct mem_block *heap, int size, ++ int align2, struct drm_file *file_priv) ++{ ++ struct mem_block *p; ++ int mask = (1 << align2) - 1; ++ ++ for (p = heap->next; p != heap; p = p->next) { ++ int start = (p->start + mask) & ~mask; ++ if (p->file_priv == NULL && start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, int start) ++{ ++ struct mem_block *p; ++ ++ for (p = heap->next; p != heap; p = p->next) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++static void free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ ++ if (p->prev->file_priv == NULL) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++static int init_heap(struct mem_block **heap, int start, int size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* Free all blocks associated with the releasing file. ++ */ ++void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, ++ struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ for (p = heap->next; p != heap; p = p->next) { ++ if (p->file_priv == file_priv) { ++ p->file_priv = NULL; ++ mark_block(dev, p, 0); ++ } ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ for (p = heap->next; p != heap; p = p->next) { ++ while (p->file_priv == NULL && p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ } ++} ++ ++/* Shutdown. ++ */ ++void i915_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); ++ *heap = NULL; ++} ++ ++static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) ++{ ++ switch (region) { ++ case I915_MEM_REGION_AGP: ++ return &dev_priv->agp_heap; ++ default: ++ return NULL; ++ } ++} ++ ++/* IOCTL HANDLERS */ ++ ++int i915_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_alloc_t *alloc = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, alloc->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ /* Make things easier on ourselves: all allocations at least ++ * 4k aligned. ++ */ ++ if (alloc->alignment < 12) ++ alloc->alignment = 12; ++ ++ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); ++ ++ if (!block) ++ return -ENOMEM; ++ ++ mark_block(dev, block, 1); ++ ++ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, ++ sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++int i915_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_free_t *memfree = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, memfree->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ block = find_block(*heap, memfree->region_offset); ++ if (!block) ++ return -EFAULT; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ mark_block(dev, block, 0); ++ free_block(block); ++ return 0; ++} ++ ++int i915_mem_init_heap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_init_heap_t *initheap = data; ++ struct mem_block **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, initheap->region); ++ if (!heap) ++ return -EFAULT; ++ ++ if (*heap) { ++ DRM_ERROR("heap already initialized?"); ++ return -EFAULT; ++ } ++ ++ return init_heap(heap, initheap->start, initheap->size); ++} ++ ++int i915_mem_destroy_heap( struct drm_device *dev, void *data, ++ struct drm_file *file_priv ) ++{ ++ drm_i915_private_t *dev_priv = dev->dev_private; ++ drm_i915_mem_destroy_heap_t *destroyheap = data; ++ struct mem_block **heap; ++ ++ if ( !dev_priv ) { ++ DRM_ERROR( "called with no initialization\n" ); ++ return -EINVAL; ++ } ++ ++ heap = get_heap( dev_priv, destroyheap->region ); ++ if (!heap) { ++ DRM_ERROR("get_heap failed"); ++ return -EFAULT; ++ } ++ ++ if (!*heap) { ++ DRM_ERROR("heap not initialized?"); ++ return -EFAULT; ++ } ++ ++ i915_mem_takedown( heap ); ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/i915_opregion.c git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c +--- git/drivers/gpu/drm-tungsten/i915_opregion.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,389 @@ ++/* ++ * ++ * Copyright 2008 Intel Corporation ++ * Copyright 2008 Red Hat ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ */ ++ ++#include ++ ++#include "drmP.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) ++#define PCI_ASLE 0xe4 ++#define PCI_ASLS 0xfc ++ ++#define OPREGION_SZ (8*1024) ++#define OPREGION_HEADER_OFFSET 0 ++#define OPREGION_ACPI_OFFSET 0x100 ++#define OPREGION_SWSCI_OFFSET 0x200 ++#define OPREGION_ASLE_OFFSET 0x300 ++#define OPREGION_VBT_OFFSET 0x1000 ++ ++#define OPREGION_SIGNATURE "IntelGraphicsMem" ++#define MBOX_ACPI (1<<0) ++#define MBOX_SWSCI (1<<1) ++#define MBOX_ASLE (1<<2) ++ ++/* _DOD id definitions */ ++#define OUTPUT_CONNECTOR_MSK 0xf000 ++#define OUTPUT_CONNECTOR_OFFSET 12 ++ ++#define OUTPUT_PORT_MSK 0x00f0 ++#define OUTPUT_PORT_OFFSET 4 ++ #define OUTPUT_PORT_ANALOG 0 ++ #define OUTPUT_PORT_LVDS 1 ++ #define OUTPUT_PORT_SDVOB 2 ++ #define OUTPUT_PORT_SDVOC 3 ++ #define OUTPUT_PORT_TV 4 ++ ++#define OUTPUT_DISPLAY_MSK 0x0f00 ++#define OUTPUT_DISPLAY_OFFSET 8 ++ #define OUTPUT_DISPLAY_OTHER 0 ++ #define OUTPUT_DISPLAY_VGA 1 ++ #define OUTPUT_DISPLAY_TV 2 ++ #define OUTPUT_DISPLAY_DIGI 3 ++ #define OUTPUT_DISPLAY_FLAT_PANEL 4 ++ ++/* predefined id for integrated LVDS and VGA connector */ ++#define OUTPUT_INT_LVDS 0x00000110 ++#define OUTPUT_INT_VGA 0x80000100 ++ ++struct opregion_header { ++ u8 signature[16]; ++ u32 size; ++ u32 opregion_ver; ++ u8 bios_ver[32]; ++ u8 vbios_ver[16]; ++ u8 driver_ver[16]; ++ u32 mboxes; ++ u8 reserved[164]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #1: public ACPI methods */ ++struct opregion_acpi { ++ u32 drdy; /* driver readiness */ ++ u32 csts; /* notification status */ ++ u32 cevt; /* current event */ ++ u8 rsvd1[20]; ++ u32 didl[8]; /* supported display devices ID list */ ++ u32 cpdl[8]; /* currently presented display list */ ++ u32 cadl[8]; /* currently active display list */ ++ u32 nadl[8]; /* next active devices list */ ++ u32 aslp; /* ASL sleep time-out */ ++ u32 tidx; /* toggle table index */ ++ u32 chpd; /* current hotplug enable indicator */ ++ u32 clid; /* current lid state*/ ++ u32 cdck; /* current docking state */ ++ u32 sxsw; /* Sx state resume */ ++ u32 evts; /* ASL supported events */ ++ u32 cnot; /* current OS notification */ ++ u32 nrdy; /* driver status */ ++ u8 rsvd2[60]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #2: SWSCI */ ++struct opregion_swsci { ++ u32 scic; /* SWSCI command|status|data */ ++ u32 parm; /* command parameters */ ++ u32 dslp; /* driver sleep time-out */ ++ u8 rsvd[244]; ++} __attribute__((packed)); ++ ++/* OpRegion mailbox #3: ASLE */ ++struct opregion_asle { ++ u32 ardy; /* driver readiness */ ++ u32 aslc; /* ASLE interrupt command */ ++ u32 tche; /* technology enabled indicator */ ++ u32 alsi; /* current ALS illuminance reading */ ++ u32 bclp; /* backlight brightness to set */ ++ u32 pfit; /* panel fitting state */ ++ u32 cblv; /* current brightness level */ ++ u16 bclm[20]; /* backlight level duty cycle mapping table */ ++ u32 cpfm; /* current panel fitting mode */ ++ u32 epfm; /* enabled panel fitting modes */ ++ u8 plut[74]; /* panel LUT and identifier */ ++ u32 pfmb; /* PWM freq and min brightness */ ++ u8 rsvd[102]; ++} __attribute__((packed)); ++ ++/* ASLE irq request bits */ ++#define ASLE_SET_ALS_ILLUM (1 << 0) ++#define ASLE_SET_BACKLIGHT (1 << 1) ++#define ASLE_SET_PFIT (1 << 2) ++#define ASLE_SET_PWM_FREQ (1 << 3) ++#define ASLE_REQ_MSK 0xf ++ ++/* response bits of ASLE irq request */ ++#define ASLE_ALS_ILLUM_FAIL (2<<10) ++#define ASLE_BACKLIGHT_FAIL (2<<12) ++#define ASLE_PFIT_FAIL (2<<14) ++#define ASLE_PWM_FREQ_FAIL (2<<16) ++ ++/* ASLE backlight brightness to set */ ++#define ASLE_BCLP_VALID (1<<31) ++#define ASLE_BCLP_MSK (~(1<<31)) ++ ++/* ASLE panel fitting request */ ++#define ASLE_PFIT_VALID (1<<31) ++#define ASLE_PFIT_CENTER (1<<0) ++#define ASLE_PFIT_STRETCH_TEXT (1<<1) ++#define ASLE_PFIT_STRETCH_GFX (1<<2) ++ ++/* PWM frequency and minimum brightness */ ++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff) ++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) ++#define ASLE_PFMB_PWM_MASK (0x7ffffe00) ++#define ASLE_PFMB_PWM_VALID (1<<31) ++ ++#define ASLE_CBLV_VALID (1<<31) ++ ++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ u32 blc_pwm_ctl; ++ ++ if (!(bclp & ASLE_BCLP_VALID)) ++ return ASLE_BACKLIGHT_FAIL; ++ ++ bclp &= ASLE_BCLP_MSK; ++ if (bclp < 0 || bclp > 255) ++ return ASLE_BACKLIGHT_FAIL; ++ ++ blc_pwm_ctl = I915_READ(BLC_PWM_CTL); ++ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; ++ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1)); ++ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; ++ ++ return 0; ++} ++ ++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) ++{ ++ return 0; ++} ++ ++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ if (pfmb & ASLE_PFMB_PWM_VALID) { ++ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); ++ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; ++ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; ++ pwm = pwm >> 9; ++ // FIXME - what do we do with the PWM? ++ } ++ return 0; ++} ++ ++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) ++{ ++ if (!(pfit & ASLE_PFIT_VALID)) ++ return ASLE_PFIT_FAIL; ++ return 0; ++} ++ ++void opregion_asle_intr(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ u32 asle_stat = 0; ++ u32 asle_req; ++ ++ if (!asle) ++ return; ++ ++ asle_req = asle->aslc & ASLE_REQ_MSK; ++ ++ if (!asle_req) { ++ DRM_DEBUG("non asle set request??\n"); ++ return; ++ } ++ ++ if (asle_req & ASLE_SET_ALS_ILLUM) ++ asle_stat |= asle_set_als_illum(dev, asle->alsi); ++ ++ if (asle_req & ASLE_SET_BACKLIGHT) ++ asle_stat |= asle_set_backlight(dev, asle->bclp); ++ ++ if (asle_req & ASLE_SET_PFIT) ++ asle_stat |= asle_set_pfit(dev, asle->pfit); ++ ++ if (asle_req & ASLE_SET_PWM_FREQ) ++ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); ++ ++ asle->aslc = asle_stat; ++} ++ ++#define ASLE_ALS_EN (1<<0) ++#define ASLE_BLC_EN (1<<1) ++#define ASLE_PFIT_EN (1<<2) ++#define ASLE_PFMB_EN (1<<3) ++ ++void opregion_enable_asle(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct opregion_asle *asle = dev_priv->opregion.asle; ++ ++ if (asle) { ++ if (IS_MOBILE(dev)) { ++ u32 pipeb_stats = I915_READ(PIPEBSTAT); ++ /* Some hardware uses the legacy backlight controller ++ to signal interrupts, so we need to set up pipe B ++ to generate an IRQ on writes */ ++ pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE; ++ I915_WRITE(PIPEBSTAT, pipeb_stats); ++ ++ dev_priv->irq_mask_reg &= ++ ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; ++ } ++ ++ dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT; ++ ++ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ++ ASLE_PFMB_EN; ++ asle->ardy = 1; ++ } ++} ++ ++#define ACPI_EV_DISPLAY_SWITCH (1<<0) ++#define ACPI_EV_LID (1<<1) ++#define ACPI_EV_DOCK (1<<2) ++ ++static struct intel_opregion *system_opregion; ++ ++int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, ++ void *data) ++{ ++ /* The only video events relevant to opregion are 0x80. These indicate ++ either a docking event, lid switch or display switch request. In ++ Linux, these are handled by the dock, button and video drivers. ++ We might want to fix the video driver to be opregion-aware in ++ future, but right now we just indicate to the firmware that the ++ request has been handled */ ++ ++ struct opregion_acpi *acpi; ++ ++ if (!system_opregion) ++ return NOTIFY_DONE; ++ ++ acpi = system_opregion->acpi; ++ acpi->csts = 0; ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block intel_opregion_notifier = { ++ .notifier_call = intel_opregion_video_event, ++}; ++ ++int intel_opregion_init(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_opregion *opregion = &dev_priv->opregion; ++ void *base; ++ u32 asls, mboxes; ++ int err = 0; ++ ++ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); ++ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); ++ if (asls == 0) { ++ DRM_DEBUG("ACPI OpRegion not supported!\n"); ++ return -ENOTSUPP; ++ } ++ ++ base = ioremap(asls, OPREGION_SZ); ++ if (!base) ++ return -ENOMEM; ++ ++ opregion->header = base; ++ if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { ++ DRM_DEBUG("opregion signature mismatch\n"); ++ err = -EINVAL; ++ goto err_out; ++ } ++ ++ mboxes = opregion->header->mboxes; ++ if (mboxes & MBOX_ACPI) { ++ DRM_DEBUG("Public ACPI methods supported\n"); ++ opregion->acpi = base + OPREGION_ACPI_OFFSET; ++ } else { ++ DRM_DEBUG("Public ACPI methods not supported\n"); ++ err = -ENOTSUPP; ++ goto err_out; ++ } ++ opregion->enabled = 1; ++ ++ if (mboxes & MBOX_SWSCI) { ++ DRM_DEBUG("SWSCI supported\n"); ++ opregion->swsci = base + OPREGION_SWSCI_OFFSET; ++ } ++ if (mboxes & MBOX_ASLE) { ++ DRM_DEBUG("ASLE supported\n"); ++ opregion->asle = base + OPREGION_ASLE_OFFSET; ++ } ++ ++ /* Notify BIOS we are ready to handle ACPI video ext notifs. ++ * Right now, all the events are handled by the ACPI video module. ++ * We don't actually need to do anything with them. */ ++ opregion->acpi->csts = 0; ++ opregion->acpi->drdy = 1; ++ ++ system_opregion = opregion; ++ register_acpi_notifier(&intel_opregion_notifier); ++ ++ return 0; ++ ++err_out: ++ iounmap(opregion->header); ++ opregion->header = NULL; ++ return err; ++} ++ ++void intel_opregion_free(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ struct intel_opregion *opregion = &dev_priv->opregion; ++ ++ if (!opregion->enabled) ++ return; ++ ++ opregion->acpi->drdy = 0; ++ ++ system_opregion = NULL; ++ unregister_acpi_notifier(&intel_opregion_notifier); ++ ++ /* just clear all opregion memory pointers now */ ++ iounmap(opregion->header); ++ opregion->header = NULL; ++ opregion->acpi = NULL; ++ opregion->swsci = NULL; ++ opregion->asle = NULL; ++ ++ opregion->enabled = 0; ++} ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/i915_suspend.c git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c +--- git/drivers/gpu/drm-tungsten/i915_suspend.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,520 @@ ++/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- ++ */ ++/* ++ * ++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "i915_drm.h" ++#include "i915_drv.h" ++ ++static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ if (pipe == PIPE_A) ++ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); ++ else ++ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); ++} ++ ++static void i915_save_palette(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); ++ u32 *array; ++ int i; ++ ++ if (!i915_pipe_enabled(dev, pipe)) ++ return; ++ ++ if (pipe == PIPE_A) ++ array = dev_priv->save_palette_a; ++ else ++ array = dev_priv->save_palette_b; ++ ++ for(i = 0; i < 256; i++) ++ array[i] = I915_READ(reg + (i << 2)); ++} ++ ++static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); ++ u32 *array; ++ int i; ++ ++ if (!i915_pipe_enabled(dev, pipe)) ++ return; ++ ++ if (pipe == PIPE_A) ++ array = dev_priv->save_palette_a; ++ else ++ array = dev_priv->save_palette_b; ++ ++ for(i = 0; i < 256; i++) ++ I915_WRITE(reg + (i << 2), array[i]); ++} ++ ++static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_WRITE8(index_port, reg); ++ return I915_READ8(data_port); ++} ++ ++static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); ++ return I915_READ8(VGA_AR_DATA_READ); ++} ++ ++static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); ++ I915_WRITE8(VGA_AR_DATA_WRITE, val); ++} ++ ++static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ ++ I915_WRITE8(index_port, reg); ++ I915_WRITE8(data_port, val); ++} ++ ++static void i915_save_vga(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ u16 cr_index, cr_data, st01; ++ ++ /* VGA color palette registers */ ++ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); ++ /* DACCRX automatically increments during read */ ++ I915_WRITE8(VGA_DACRX, 0); ++ /* Read 3 bytes of color data from each index */ ++ for (i = 0; i < 256 * 3; i++) ++ dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA); ++ ++ /* MSR bits */ ++ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); ++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { ++ cr_index = VGA_CR_INDEX_CGA; ++ cr_data = VGA_CR_DATA_CGA; ++ st01 = VGA_ST01_CGA; ++ } else { ++ cr_index = VGA_CR_INDEX_MDA; ++ cr_data = VGA_CR_DATA_MDA; ++ st01 = VGA_ST01_MDA; ++ } ++ ++ /* CRT controller regs */ ++ i915_write_indexed(dev, cr_index, cr_data, 0x11, ++ i915_read_indexed(dev, cr_index, cr_data, 0x11) & ++ (~0x80)); ++ for (i = 0; i <= 0x24; i++) ++ dev_priv->saveCR[i] = ++ i915_read_indexed(dev, cr_index, cr_data, i); ++ /* Make sure we don't turn off CR group 0 writes */ ++ dev_priv->saveCR[0x11] &= ~0x80; ++ ++ /* Attribute controller registers */ ++ I915_READ8(st01); ++ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); ++ for (i = 0; i <= 0x14; i++) ++ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); ++ I915_READ8(st01); ++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); ++ I915_READ8(st01); ++ ++ /* Graphics controller registers */ ++ for (i = 0; i < 9; i++) ++ dev_priv->saveGR[i] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); ++ ++ dev_priv->saveGR[0x10] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); ++ dev_priv->saveGR[0x11] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); ++ dev_priv->saveGR[0x18] = ++ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); ++ ++ /* Sequencer registers */ ++ for (i = 0; i < 8; i++) ++ dev_priv->saveSR[i] = ++ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); ++} ++ ++static void i915_restore_vga(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ u16 cr_index, cr_data, st01; ++ ++ /* MSR bits */ ++ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); ++ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { ++ cr_index = VGA_CR_INDEX_CGA; ++ cr_data = VGA_CR_DATA_CGA; ++ st01 = VGA_ST01_CGA; ++ } else { ++ cr_index = VGA_CR_INDEX_MDA; ++ cr_data = VGA_CR_DATA_MDA; ++ st01 = VGA_ST01_MDA; ++ } ++ ++ /* Sequencer registers, don't write SR07 */ ++ for (i = 0; i < 7; i++) ++ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, ++ dev_priv->saveSR[i]); ++ ++ /* CRT controller regs */ ++ /* Enable CR group 0 writes */ ++ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); ++ for (i = 0; i <= 0x24; i++) ++ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); ++ ++ /* Graphics controller regs */ ++ for (i = 0; i < 9; i++) ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, ++ dev_priv->saveGR[i]); ++ ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, ++ dev_priv->saveGR[0x10]); ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, ++ dev_priv->saveGR[0x11]); ++ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, ++ dev_priv->saveGR[0x18]); ++ ++ /* Attribute controller registers */ ++ I915_READ8(st01); /* switch back to index mode */ ++ for (i = 0; i <= 0x14; i++) ++ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); ++ I915_READ8(st01); /* switch back to index mode */ ++ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); ++ I915_READ8(st01); ++ ++ /* VGA color palette registers */ ++ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); ++ /* DACCRX automatically increments during read */ ++ I915_WRITE8(VGA_DACWX, 0); ++ /* Read 3 bytes of color data from each index */ ++ for (i = 0; i < 256 * 3; i++) ++ I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]); ++ ++} ++ ++int i915_save_state(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ ++#if defined(__FreeBSD__) ++ dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1); ++#else ++ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); ++#endif ++ ++ /* Display arbitration control */ ++ dev_priv->saveDSPARB = I915_READ(DSPARB); ++ ++ /* Pipe & plane A info */ ++ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); ++ dev_priv->savePIPEASRC = I915_READ(PIPEASRC); ++ dev_priv->saveFPA0 = I915_READ(FPA0); ++ dev_priv->saveFPA1 = I915_READ(FPA1); ++ dev_priv->saveDPLL_A = I915_READ(DPLL_A); ++ if (IS_I965G(dev)) ++ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); ++ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); ++ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); ++ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); ++ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); ++ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); ++ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); ++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); ++ ++ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); ++ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); ++ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); ++ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); ++ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); ++ if (IS_I965G(dev)) { ++ dev_priv->saveDSPASURF = I915_READ(DSPASURF); ++ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); ++ } ++ i915_save_palette(dev, PIPE_A); ++ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); ++ ++ /* Pipe & plane B info */ ++ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); ++ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); ++ dev_priv->saveFPB0 = I915_READ(FPB0); ++ dev_priv->saveFPB1 = I915_READ(FPB1); ++ dev_priv->saveDPLL_B = I915_READ(DPLL_B); ++ if (IS_I965G(dev)) ++ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); ++ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); ++ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); ++ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); ++ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); ++ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); ++ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); ++ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); ++ ++ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); ++ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); ++ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); ++ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); ++ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); ++ if (IS_I965GM(dev) || IS_GM45(dev)) { ++ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); ++ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); ++ } ++ i915_save_palette(dev, PIPE_B); ++ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); ++ ++ /* CRT state */ ++ dev_priv->saveADPA = I915_READ(ADPA); ++ ++ /* LVDS state */ ++ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); ++ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); ++ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); ++ if (IS_I965G(dev)) ++ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); ++ if (IS_MOBILE(dev) && !IS_I830(dev)) ++ dev_priv->saveLVDS = I915_READ(LVDS); ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); ++ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); ++ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); ++ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); ++ ++ /* FIXME: save TV & SDVO state */ ++ ++ /* FBC state */ ++ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); ++ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); ++ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); ++ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); ++ ++ /* Interrupt state */ ++ dev_priv->saveIIR = I915_READ(IIR); ++ dev_priv->saveIER = I915_READ(IER); ++ dev_priv->saveIMR = I915_READ(IMR); ++ ++ /* VGA state */ ++ dev_priv->saveVGA0 = I915_READ(VGA0); ++ dev_priv->saveVGA1 = I915_READ(VGA1); ++ dev_priv->saveVGA_PD = I915_READ(VGA_PD); ++ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); ++ ++ /* Clock gating state */ ++ dev_priv->saveD_STATE = I915_READ(D_STATE); ++ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); ++ ++ /* Cache mode state */ ++ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); ++ ++ /* Memory Arbitration state */ ++ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); ++ ++ /* Scratch space */ ++ for (i = 0; i < 16; i++) { ++ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); ++ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); ++ } ++ for (i = 0; i < 3; i++) ++ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); ++ ++ i915_save_vga(dev); ++ ++ return 0; ++} ++ ++int i915_restore_state(struct drm_device *dev) ++{ ++ struct drm_i915_private *dev_priv = dev->dev_private; ++ int i; ++ ++#if defined(__FreeBSD__) ++ pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1); ++#else ++ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); ++#endif ++ ++ I915_WRITE(DSPARB, dev_priv->saveDSPARB); ++ ++ /* Pipe & plane A info */ ++ /* Prime the clock */ ++ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { ++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & ++ ~DPLL_VCO_ENABLE); ++ DRM_UDELAY(150); ++ } ++ I915_WRITE(FPA0, dev_priv->saveFPA0); ++ I915_WRITE(FPA1, dev_priv->saveFPA1); ++ /* Actually enable it */ ++ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); ++ DRM_UDELAY(150); ++ if (IS_I965G(dev)) ++ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); ++ DRM_UDELAY(150); ++ ++ /* Restore mode */ ++ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); ++ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); ++ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); ++ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); ++ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); ++ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); ++ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); ++ ++ /* Restore plane info */ ++ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); ++ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); ++ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); ++ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); ++ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); ++ if (IS_I965G(dev)) { ++ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); ++ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); ++ } ++ ++ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); ++ ++ i915_restore_palette(dev, PIPE_A); ++ /* Enable the plane */ ++ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); ++ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); ++ ++ /* Pipe & plane B info */ ++ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { ++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & ++ ~DPLL_VCO_ENABLE); ++ DRM_UDELAY(150); ++ } ++ I915_WRITE(FPB0, dev_priv->saveFPB0); ++ I915_WRITE(FPB1, dev_priv->saveFPB1); ++ /* Actually enable it */ ++ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); ++ DRM_UDELAY(150); ++ if (IS_I965G(dev)) ++ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); ++ DRM_UDELAY(150); ++ ++ /* Restore mode */ ++ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); ++ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); ++ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); ++ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); ++ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); ++ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); ++ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); ++ ++ /* Restore plane info */ ++ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); ++ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); ++ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); ++ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); ++ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); ++ if (IS_I965G(dev)) { ++ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); ++ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); ++ } ++ ++ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); ++ ++ i915_restore_palette(dev, PIPE_B); ++ /* Enable the plane */ ++ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); ++ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); ++ ++ /* CRT state */ ++ I915_WRITE(ADPA, dev_priv->saveADPA); ++ ++ /* LVDS state */ ++ if (IS_I965G(dev)) ++ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); ++ if (IS_MOBILE(dev) && !IS_I830(dev)) ++ I915_WRITE(LVDS, dev_priv->saveLVDS); ++ if (!IS_I830(dev) && !IS_845G(dev)) ++ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); ++ ++ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); ++ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); ++ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); ++ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); ++ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); ++ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); ++ ++ /* FIXME: restore TV & SDVO state */ ++ ++ /* FBC info */ ++ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); ++ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); ++ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); ++ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); ++ ++ /* VGA state */ ++ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); ++ I915_WRITE(VGA0, dev_priv->saveVGA0); ++ I915_WRITE(VGA1, dev_priv->saveVGA1); ++ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); ++ DRM_UDELAY(150); ++ ++ /* Clock gating state */ ++ I915_WRITE (D_STATE, dev_priv->saveD_STATE); ++ I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS); ++ ++ /* Cache mode state */ ++ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); ++ ++ /* Memory arbitration state */ ++ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); ++ ++ for (i = 0; i < 16; i++) { ++ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); ++ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); ++ } ++ for (i = 0; i < 3; i++) ++ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); ++ ++ i915_restore_vga(dev); ++ ++ return 0; ++} ++ +diff -Nurd git/drivers/gpu/drm-tungsten/imagine_drv.c git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c +--- git/drivers/gpu/drm-tungsten/imagine_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,85 @@ ++/* ++ * Copyright 2005 Adam Jackson. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * on the rights to use, copy, modify, merge, publish, distribute, sub ++ * license, and/or sell copies of the Software, and to permit persons to whom ++ * the Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* derived from tdfx_drv.c */ ++ ++#include "drmP.h" ++#include "imagine_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct drm_driver driver; ++ ++static struct pci_device_id pciidlist[] = { ++ imagine_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int __init imagine_init(void) ++{ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit imagine_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(imagine_init); ++module_exit(imagine_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/Kconfig git-nokia/drivers/gpu/drm-tungsten/Kconfig +--- git/drivers/gpu/drm-tungsten/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/Kconfig 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,130 @@ ++# ++# DRM device configuration from Tungsten Graphics ++# ++# This driver provides support for the ++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ++# ++# The driver is the Tungsten alternative of the original DRM driver. ++# ++ ++menuconfig DRM_TUNGSTEN ++ tristate "Direct Rendering Manager (Tungsten - XFree86 4.1.0 and higher DRI support)" ++ help ++ Kernel-level support for the Direct Rendering Infrastructure (DRI) ++ introduced in XFree86 4.0. If you say Y here, you need to select ++ the module that's right for your graphics card from the list below. ++ These modules provide support for synchronization, security, and ++ DMA transfers. Please see for more ++ details. You should also select and configure AGP ++ (/dev/agpgart) support. ++ ++config DRM_TUNGSTEN_PVR2D ++ tristate "PVR2D kernel helper" ++ depends on DRM_TUNGSTEN && PVR ++ help ++ Choose this option if you want to give DRI access to your card ++ handled by the Imagination PowerVR framework. If M is selected, ++ the module will be called pvr2d. ++ ++if DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG ++ ++config DRM_TUNGSTEN_TDFX ++ tristate "3dfx Banshee/Voodoo3+" ++ help ++ Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), ++ graphics card. If M is selected, the module will be called tdfx. ++ ++config DRM_TUNGSTEN_R128 ++ tristate "ATI Rage 128" ++ help ++ Choose this option if you have an ATI Rage 128 graphics card. If M ++ is selected, the module will be called r128. AGP support for ++ this card is strongly suggested (unless you have a PCI version). ++ ++config DRM_TUNGSTEN_RADEON ++ tristate "ATI Radeon" ++ help ++ Choose this option if you have an ATI Radeon graphics card. There ++ are both PCI and AGP versions. You don't need to choose this to ++ run the Radeon in plain VGA mode. ++ ++ If M is selected, the module will be called radeon. ++ ++config DRM_TUNGSTEN_I810 ++ tristate "Intel I810" ++ depends on AGP && AGP_INTEL ++ help ++ Choose this option if you have an Intel I810 graphics card. If M is ++ selected, the module will be called i810. AGP support is required ++ for this driver to work. ++ ++config DRM_TUNGSTEN_I915 ++ tristate "i915 driver" ++ depends on AGP && AGP_INTEL ++ help ++ Choose this option if you have a system that has Intel 830M, 845G, ++ 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the ++ module will be called i915. AGP support is required for this driver ++ to work. This driver is used by the Intel driver in X.org 6.8 and ++ XFree86 4.4 and above. If unsure, build this and i830 as modules and ++ the X server will load the correct one. ++ ++config DRM_TUNGSTEN_MGA ++ tristate "Matrox g200/g400" ++ help ++ Choose this option if you have a Matrox G200, G400 or G450 graphics ++ card. If M is selected, the module will be called mga. AGP ++ support is required for this driver to work. ++ ++config DRM_TUNGSTEN_SIS ++ tristate "SiS video cards" ++ depends on AGP ++ help ++ Choose this option if you have a SiS 630 or compatible video ++ chipset. If M is selected the module will be called sis. AGP ++ support is required for this driver to work. ++ ++config DRM_TUNGSTEN_VIA ++ tristate "Via unichrome video cards" ++ help ++ Choose this option if you have a Via unichrome or compatible video ++ chipset. If M is selected the module will be called via. ++ ++config DRM_TUNGSTEN_SAVAGE ++ tristate "Savage video cards" ++ help ++ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister ++ chipset. If M is selected the module will be called savage. ++ ++config DRM_TUNGSTEN_FFB ++ tristate "Creator/Creator3D direct rendering" ++ help ++ Choose this option to include the Creator/Creator3D direct rendering ++ driver. If M is selected the module will be called ffb. ++ ++config DRM_TUNGSTEN_MACH64 ++ tristate "MACH64 Rage Pro video card" ++ help ++ Choose this option if you have a Mach64 Rage Pro chipset. ++ If M is selected the module will be called mach64. ++ ++config DRM_TUNGSTEN_NV ++ tristate "Nvidia video card (NV driver)" ++ help ++ Choose this option if you have a Nvidia chipset and want to use the ++ original nv driver. If M is selected the module will be called nv. ++ ++config DRM_TUNGSTEN_NOUVEAU ++ tristate "Nvidia video card (Nouveau driver)" ++ help ++ Choose this option if you have a Nvidia chipset and want to use the ++ nouveau driver. If M is selected the module will be called nouveau. ++ ++config DRM_TUNGSTEN_XGI ++ tristate "XGI video card" ++ help ++ Choose this option if you have a XGI chipset. If M is selected the ++ module will be called xgi. ++ ++endif # DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG ++ +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_dma.c git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c +--- git/drivers/gpu/drm-tungsten/mach64_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1778 @@ ++/* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */ ++/** ++ * \file mach64_dma.c ++ * DMA support for mach64 (Rage Pro) driver ++ * ++ * \author Gareth Hughes ++ * \author Frank C. Earl ++ * \author Leif Delgass ++ * \author José Fonseca ++ */ ++ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++/*******************************************************************/ ++/** \name Engine, FIFO control */ ++/*@{*/ ++ ++/** ++ * Waits for free entries in the FIFO. ++ * ++ * \note Most writes to Mach64 registers are automatically routed through ++ * command FIFO which is 16 entry deep. Prior to writing to any draw engine ++ * register one has to ensure that enough FIFO entries are available by calling ++ * this function. Failure to do so may cause the engine to lock. ++ * ++ * \param dev_priv pointer to device private data structure. ++ * \param entries number of free entries in the FIFO to wait for. ++ * ++ * \returns zero on success, or -EBUSY if the timeout (specificed by ++ * drm_mach64_private::usec_timeout) occurs. ++ */ ++int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries) ++{ ++ int slots = 0, i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK); ++ if (slots <= (0x8000 >> entries)) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! slots=%d entries=%d\n", slots, entries); ++ return -EBUSY; ++} ++ ++/** ++ * Wait for the draw engine to be idle. ++ */ ++int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv) ++{ ++ int i, ret; ++ ++ ret = mach64_do_wait_for_fifo(dev_priv, 16); ++ if (ret < 0) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Wait for free entries in the ring buffer. ++ * ++ * The Mach64 bus master can be configured to act as a virtual FIFO, using a ++ * circular buffer (commonly referred as "ring buffer" in other drivers) with ++ * pointers to engine commands. This allows the CPU to do other things while ++ * the graphics engine is busy, i.e., DMA mode. ++ * ++ * This function should be called before writing new entries to the ring ++ * buffer. ++ * ++ * \param dev_priv pointer to device private data structure. ++ * \param n number of free entries in the ring buffer to wait for. ++ * ++ * \returns zero on success, or -EBUSY if the timeout (specificed by ++ * drm_mach64_private_t::usec_timeout) occurs. ++ * ++ * \sa mach64_dump_ring_info() ++ */ ++int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ mach64_update_ring_snapshot(dev_priv); ++ if (ring->space >= n) { ++ if (i > 0) ++ DRM_DEBUG("%d usecs\n", i); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This is being ignored... */ ++ DRM_ERROR("failed!\n"); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Wait until all DMA requests have been processed... ++ * ++ * \sa mach64_wait_ring() ++ */ ++static int mach64_ring_idle(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ u32 head; ++ int i; ++ ++ head = ring->head; ++ i = 0; ++ while (i < dev_priv->usec_timeout) { ++ mach64_update_ring_snapshot(dev_priv); ++ if (ring->head == ring->tail && ++ !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) { ++ if (i > 0) ++ DRM_DEBUG("%d usecs\n", i); ++ return 0; ++ } ++ if (ring->head == head) { ++ ++i; ++ } else { ++ head = ring->head; ++ i = 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++ DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT)); ++ mach64_dump_ring_info(dev_priv); ++ return -EBUSY; ++} ++ ++/** ++ * Reset the the ring buffer descriptors. ++ * ++ * \sa mach64_do_engine_reset() ++ */ ++static void mach64_ring_reset(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ mach64_do_release_used_buffers(dev_priv); ++ ring->head_addr = ring->start_addr; ++ ring->head = ring->tail = 0; ++ ring->space = ring->size; ++ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ dev_priv->ring_running = 0; ++} ++ ++/** ++ * Ensure the all the queued commands will be processed. ++ */ ++int mach64_do_dma_flush(drm_mach64_private_t *dev_priv) ++{ ++ /* FIXME: It's not necessary to wait for idle when flushing ++ * we just need to ensure the ring will be completely processed ++ * in finite time without another ioctl ++ */ ++ return mach64_ring_idle(dev_priv); ++} ++ ++/** ++ * Stop all DMA activity. ++ */ ++int mach64_do_dma_idle(drm_mach64_private_t *dev_priv) ++{ ++ int ret; ++ ++ /* wait for completion */ ++ if ((ret = mach64_ring_idle(dev_priv)) < 0) { ++ DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE), ++ dev_priv->ring.tail); ++ return ret; ++ } ++ ++ mach64_ring_stop(dev_priv); ++ ++ /* clean up after pass */ ++ mach64_do_release_used_buffers(dev_priv); ++ return 0; ++} ++ ++/** ++ * Reset the engine. This will stop the DMA if it is running. ++ */ ++int mach64_do_engine_reset(drm_mach64_private_t *dev_priv) ++{ ++ u32 tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ /* Kill off any outstanding DMA transfers. ++ */ ++ tmp = MACH64_READ(MACH64_BUS_CNTL); ++ MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS); ++ ++ /* Reset the GUI engine (high to low transition). ++ */ ++ tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); ++ MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE); ++ /* Enable the GUI engine ++ */ ++ tmp = MACH64_READ(MACH64_GEN_TEST_CNTL); ++ MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE); ++ ++ /* ensure engine is not locked up by clearing any FIFO or HOST errors ++ */ ++ tmp = MACH64_READ(MACH64_BUS_CNTL); ++ MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000); ++ ++ /* Once GUI engine is restored, disable bus mastering */ ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ ++ /* Reset descriptor ring */ ++ mach64_ring_reset(dev_priv); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name Debugging output */ ++/*@{*/ ++ ++/** ++ * Dump engine registers values. ++ */ ++void mach64_dump_engine_info(drm_mach64_private_t *dev_priv) ++{ ++ DRM_INFO("\n"); ++ if (!dev_priv->is_pci) { ++ DRM_INFO(" AGP_BASE = 0x%08x\n", ++ MACH64_READ(MACH64_AGP_BASE)); ++ DRM_INFO(" AGP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_AGP_CNTL)); ++ } ++ DRM_INFO(" ALPHA_TST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_ALPHA_TST_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_COMMAND = 0x%08x\n", ++ MACH64_READ(MACH64_BM_COMMAND)); ++ DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", ++ MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); ++ DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE)); ++ DRM_INFO(" BM_STATUS = 0x%08x\n", ++ MACH64_READ(MACH64_BM_STATUS)); ++ DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); ++ DRM_INFO(" BM_SYSTEM_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_TABLE)); ++ DRM_INFO(" BUS_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */ ++ DRM_INFO(" CLR_CMP_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_CLR_CMP_CLR)); ++ DRM_INFO(" CLR_CMP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CLR_CMP_CNTL)); ++ /* DRM_INFO( " CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */ ++ DRM_INFO(" CONFIG_CHIP_ID = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_CHIP_ID)); ++ DRM_INFO(" CONFIG_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_CNTL)); ++ DRM_INFO(" CONFIG_STAT0 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT0)); ++ DRM_INFO(" CONFIG_STAT1 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT1)); ++ DRM_INFO(" CONFIG_STAT2 = 0x%08x\n", ++ MACH64_READ(MACH64_CONFIG_STAT2)); ++ DRM_INFO(" CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG)); ++ DRM_INFO(" CUSTOM_MACRO_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_CUSTOM_MACRO_CNTL)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */ ++ /* DRM_INFO( " DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */ ++ DRM_INFO(" DP_BKGD_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_DP_BKGD_CLR)); ++ DRM_INFO(" DP_FRGD_CLR = 0x%08x\n", ++ MACH64_READ(MACH64_DP_FRGD_CLR)); ++ DRM_INFO(" DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX)); ++ DRM_INFO(" DP_PIX_WIDTH = 0x%08x\n", ++ MACH64_READ(MACH64_DP_PIX_WIDTH)); ++ DRM_INFO(" DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC)); ++ DRM_INFO(" DP_WRITE_MASK = 0x%08x\n", ++ MACH64_READ(MACH64_DP_WRITE_MASK)); ++ DRM_INFO(" DSP_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_DSP_CONFIG)); ++ DRM_INFO(" DSP_ON_OFF = 0x%08x\n", ++ MACH64_READ(MACH64_DSP_ON_OFF)); ++ DRM_INFO(" DST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_DST_CNTL)); ++ DRM_INFO(" DST_OFF_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_DST_OFF_PITCH)); ++ DRM_INFO("\n"); ++ /* DRM_INFO( " EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */ ++ DRM_INFO(" EXT_MEM_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_EXT_MEM_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" FIFO_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_FIFO_STAT)); ++ DRM_INFO("\n"); ++ DRM_INFO(" GEN_TEST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GEN_TEST_CNTL)); ++ /* DRM_INFO( " GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */ ++ DRM_INFO(" GUI_CMDFIFO_DATA = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CMDFIFO_DATA)); ++ DRM_INFO(" GUI_CMDFIFO_DEBUG = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG)); ++ DRM_INFO(" GUI_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_CNTL)); ++ DRM_INFO(" GUI_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_STAT)); ++ DRM_INFO(" GUI_TRAJ_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_TRAJ_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" HOST_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_HOST_CNTL)); ++ DRM_INFO(" HW_DEBUG = 0x%08x\n", ++ MACH64_READ(MACH64_HW_DEBUG)); ++ DRM_INFO("\n"); ++ DRM_INFO(" MEM_ADDR_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_MEM_ADDR_CONFIG)); ++ DRM_INFO(" MEM_BUF_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_MEM_BUF_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" PAT_REG0 = 0x%08x\n", ++ MACH64_READ(MACH64_PAT_REG0)); ++ DRM_INFO(" PAT_REG1 = 0x%08x\n", ++ MACH64_READ(MACH64_PAT_REG1)); ++ DRM_INFO("\n"); ++ DRM_INFO(" SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT)); ++ DRM_INFO(" SC_RIGHT = 0x%08x\n", ++ MACH64_READ(MACH64_SC_RIGHT)); ++ DRM_INFO(" SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP)); ++ DRM_INFO(" SC_BOTTOM = 0x%08x\n", ++ MACH64_READ(MACH64_SC_BOTTOM)); ++ DRM_INFO("\n"); ++ DRM_INFO(" SCALE_3D_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SCALE_3D_CNTL)); ++ DRM_INFO(" SCRATCH_REG0 = 0x%08x\n", ++ MACH64_READ(MACH64_SCRATCH_REG0)); ++ DRM_INFO(" SCRATCH_REG1 = 0x%08x\n", ++ MACH64_READ(MACH64_SCRATCH_REG1)); ++ DRM_INFO(" SETUP_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SETUP_CNTL)); ++ DRM_INFO(" SRC_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SRC_CNTL)); ++ DRM_INFO("\n"); ++ DRM_INFO(" TEX_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_TEX_CNTL)); ++ DRM_INFO(" TEX_SIZE_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_TEX_SIZE_PITCH)); ++ DRM_INFO(" TIMER_CONFIG = 0x%08x\n", ++ MACH64_READ(MACH64_TIMER_CONFIG)); ++ DRM_INFO("\n"); ++ DRM_INFO(" Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL)); ++ DRM_INFO(" Z_OFF_PITCH = 0x%08x\n", ++ MACH64_READ(MACH64_Z_OFF_PITCH)); ++ DRM_INFO("\n"); ++} ++ ++#define MACH64_DUMP_CONTEXT 3 ++ ++/** ++ * Used by mach64_dump_ring_info() to dump the contents of the current buffer ++ * pointed by the ring head. ++ */ ++static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv, ++ struct drm_buf *buf) ++{ ++ u32 addr = GETBUFADDR(buf); ++ u32 used = buf->used >> 2; ++ u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR); ++ u32 *p = GETBUFPTR(buf); ++ int skipped = 0; ++ ++ DRM_INFO("buffer contents:\n"); ++ ++ while (used) { ++ u32 reg, count; ++ ++ reg = le32_to_cpu(*p++); ++ if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || ++ (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && ++ addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || ++ addr >= ++ GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) { ++ DRM_INFO("%08x: 0x%08x\n", addr, reg); ++ } ++ addr += 4; ++ used--; ++ ++ count = (reg >> 16) + 1; ++ reg = reg & 0xffff; ++ reg = MMSELECT(reg); ++ while (count && used) { ++ if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 || ++ (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 && ++ addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) || ++ addr >= ++ GETBUFADDR(buf) + buf->used - ++ MACH64_DUMP_CONTEXT * 4) { ++ DRM_INFO("%08x: 0x%04x = 0x%08x\n", addr, ++ reg, le32_to_cpu(*p)); ++ skipped = 0; ++ } else { ++ if (!skipped) { ++ DRM_INFO(" ...\n"); ++ skipped = 1; ++ } ++ } ++ p++; ++ addr += 4; ++ used--; ++ ++ reg += 4; ++ count--; ++ } ++ } ++ ++ DRM_INFO("\n"); ++} ++ ++/** ++ * Dump the ring state and contents, including the contents of the buffer being ++ * processed by the graphics engine. ++ */ ++void mach64_dump_ring_info(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ int i, skipped; ++ ++ DRM_INFO("\n"); ++ ++ DRM_INFO("ring contents:\n"); ++ DRM_INFO(" head_addr: 0x%08x head: %u tail: %u\n\n", ++ ring->head_addr, ring->head, ring->tail); ++ ++ skipped = 0; ++ for (i = 0; i < ring->size / sizeof(u32); i += 4) { ++ if (i <= MACH64_DUMP_CONTEXT * 4 || ++ i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 || ++ (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 && ++ i <= ring->tail + MACH64_DUMP_CONTEXT * 4) || ++ (i >= ring->head - MACH64_DUMP_CONTEXT * 4 && ++ i <= ring->head + MACH64_DUMP_CONTEXT * 4)) { ++ DRM_INFO(" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x%s%s\n", ++ (u32)(ring->start_addr + i * sizeof(u32)), ++ le32_to_cpu(((u32 *) ring->start)[i + 0]), ++ le32_to_cpu(((u32 *) ring->start)[i + 1]), ++ le32_to_cpu(((u32 *) ring->start)[i + 2]), ++ le32_to_cpu(((u32 *) ring->start)[i + 3]), ++ i == ring->head ? " (head)" : "", ++ i == ring->tail ? " (tail)" : ""); ++ skipped = 0; ++ } else { ++ if (!skipped) { ++ DRM_INFO(" ...\n"); ++ skipped = 1; ++ } ++ } ++ } ++ ++ DRM_INFO("\n"); ++ ++ if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) { ++ struct list_head *ptr; ++ u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]); ++ ++ list_for_each(ptr, &dev_priv->pending) { ++ drm_mach64_freelist_t *entry = ++ list_entry(ptr, drm_mach64_freelist_t, list); ++ struct drm_buf *buf = entry->buf; ++ ++ u32 buf_addr = GETBUFADDR(buf); ++ ++ if (buf_addr <= addr && addr < buf_addr + buf->used) ++ mach64_dump_buf_info(dev_priv, buf); ++ } ++ } ++ ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_GUI_TABLE = 0x%08x\n", ++ MACH64_READ(MACH64_BM_GUI_TABLE)); ++ DRM_INFO("\n"); ++ DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n", ++ MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET)); ++ DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n", ++ MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR)); ++ DRM_INFO(" BM_COMMAND = 0x%08x\n", ++ MACH64_READ(MACH64_BM_COMMAND)); ++ DRM_INFO("\n"); ++ DRM_INFO(" BM_STATUS = 0x%08x\n", ++ MACH64_READ(MACH64_BM_STATUS)); ++ DRM_INFO(" BUS_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_INFO(" FIFO_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_FIFO_STAT)); ++ DRM_INFO(" GUI_STAT = 0x%08x\n", ++ MACH64_READ(MACH64_GUI_STAT)); ++ DRM_INFO(" SRC_CNTL = 0x%08x\n", ++ MACH64_READ(MACH64_SRC_CNTL)); ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA descriptor ring macros */ ++/*@{*/ ++ ++/** ++ * Add the end mark to the ring's new tail position. ++ * ++ * The bus master engine will keep processing the DMA buffers listed in the ring ++ * until it finds this mark, making it stop. ++ * ++ * \sa mach64_clear_dma_eol ++ */ ++static __inline__ void mach64_set_dma_eol(volatile u32 *addr) ++{ ++#if defined(__i386__) ++ int nr = 31; ++ ++ /* Taken from include/asm-i386/bitops.h linux header */ ++ __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr) ++ :"Ir"(nr)); ++#elif defined(__powerpc__) ++ u32 old; ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ /* Taken from the include/asm-ppc/bitops.h linux header */ ++ __asm__ __volatile__("\n\ ++1: lwarx %0,0,%3 \n\ ++ or %0,%0,%2 \n\ ++ stwcx. %0,0,%3 \n\ ++ bne- 1b":"=&r"(old), "=m"(*addr) ++ :"r"(mask), "r"(addr), "m"(*addr) ++ :"cc"); ++#elif defined(__alpha__) ++ u32 temp; ++ u32 mask = MACH64_DMA_EOL; ++ ++ /* Taken from the include/asm-alpha/bitops.h linux header */ ++ __asm__ __volatile__("1: ldl_l %0,%3\n" ++ " bis %0,%2,%0\n" ++ " stl_c %0,%1\n" ++ " beq %0,2f\n" ++ ".subsection 2\n" ++ "2: br 1b\n" ++ ".previous":"=&r"(temp), "=m"(*addr) ++ :"Ir"(mask), "m"(*addr)); ++#else ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ *addr |= mask; ++#endif ++} ++ ++/** ++ * Remove the end mark from the ring's old tail position. ++ * ++ * It should be called after calling mach64_set_dma_eol to mark the ring's new ++ * tail position. ++ * ++ * We update the end marks while the bus master engine is in operation. Since ++ * the bus master engine may potentially be reading from the same position ++ * that we write, we must change atomically to avoid having intermediary bad ++ * data. ++ */ ++static __inline__ void mach64_clear_dma_eol(volatile u32 *addr) ++{ ++#if defined(__i386__) ++ int nr = 31; ++ ++ /* Taken from include/asm-i386/bitops.h linux header */ ++ __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr) ++ :"Ir"(nr)); ++#elif defined(__powerpc__) ++ u32 old; ++ u32 mask = cpu_to_le32(MACH64_DMA_EOL); ++ ++ /* Taken from the include/asm-ppc/bitops.h linux header */ ++ __asm__ __volatile__("\n\ ++1: lwarx %0,0,%3 \n\ ++ andc %0,%0,%2 \n\ ++ stwcx. %0,0,%3 \n\ ++ bne- 1b":"=&r"(old), "=m"(*addr) ++ :"r"(mask), "r"(addr), "m"(*addr) ++ :"cc"); ++#elif defined(__alpha__) ++ u32 temp; ++ u32 mask = ~MACH64_DMA_EOL; ++ ++ /* Taken from the include/asm-alpha/bitops.h linux header */ ++ __asm__ __volatile__("1: ldl_l %0,%3\n" ++ " and %0,%2,%0\n" ++ " stl_c %0,%1\n" ++ " beq %0,2f\n" ++ ".subsection 2\n" ++ "2: br 1b\n" ++ ".previous":"=&r"(temp), "=m"(*addr) ++ :"Ir"(mask), "m"(*addr)); ++#else ++ u32 mask = cpu_to_le32(~MACH64_DMA_EOL); ++ ++ *addr &= mask; ++#endif ++} ++ ++#define RING_LOCALS \ ++ int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring ++ ++#define RING_WRITE_OFS _ring_write ++ ++#define BEGIN_RING(n) \ ++ do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( "BEGIN_RING( %d ) \n", \ ++ (n) ); \ ++ } \ ++ if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ ++ int ret; \ ++ if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \ ++ DRM_ERROR( "wait_ring failed, resetting engine\n"); \ ++ mach64_dump_engine_info( dev_priv ); \ ++ mach64_do_engine_reset( dev_priv ); \ ++ return ret; \ ++ } \ ++ } \ ++ dev_priv->ring.space -= (n) * sizeof(u32); \ ++ _ring = (u32 *) dev_priv->ring.start; \ ++ _ring_tail = _ring_write = dev_priv->ring.tail; \ ++ _ring_mask = dev_priv->ring.tail_mask; \ ++ } while (0) ++ ++#define OUT_RING( x ) \ ++do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), _ring_write ); \ ++ } \ ++ _ring[_ring_write++] = cpu_to_le32( x ); \ ++ _ring_write &= _ring_mask; \ ++} while (0) ++ ++#define ADVANCE_RING() \ ++do { \ ++ if (MACH64_VERBOSE) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ _ring_write, _ring_tail ); \ ++ } \ ++ DRM_MEMORYBARRIER(); \ ++ mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \ ++ DRM_MEMORYBARRIER(); \ ++ dev_priv->ring.tail = _ring_write; \ ++ mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \ ++} while (0) ++ ++/** ++ * Queue a DMA buffer of registers writes into the ring buffer. ++ */ ++int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *entry) ++{ ++ int bytes, pages, remainder; ++ u32 address, page; ++ int i; ++ struct drm_buf *buf = entry->buf; ++ RING_LOCALS; ++ ++ bytes = buf->used; ++ address = GETBUFADDR( buf ); ++ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; ++ ++ BEGIN_RING( pages * 4 ); ++ ++ for ( i = 0 ; i < pages-1 ; i++ ) { ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( page ); ++ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ } ++ ++ /* generate the final descriptor for any remaining commands in this buffer */ ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ remainder = bytes - i * MACH64_DMA_CHUNKSIZE; ++ ++ /* Save dword offset of last descriptor for this buffer. ++ * This is needed to check for completion of the buffer in freelist_get ++ */ ++ entry->ring_ofs = RING_WRITE_OFS; ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( page ); ++ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); ++ OUT_RING( 0 ); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/** ++ * Queue DMA buffer controlling host data tranfers (e.g., blit). ++ * ++ * Almost identical to mach64_add_buf_to_ring. ++ */ ++int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *entry) ++{ ++ int bytes, pages, remainder; ++ u32 address, page; ++ int i; ++ struct drm_buf *buf = entry->buf; ++ RING_LOCALS; ++ ++ bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET; ++ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; ++ address = GETBUFADDR( buf ); ++ ++ BEGIN_RING( 4 + pages * 4 ); ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); ++ OUT_RING( address ); ++ OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ address += MACH64_HOSTDATA_BLIT_OFFSET; ++ ++ for ( i = 0 ; i < pages-1 ; i++ ) { ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); ++ OUT_RING( page ); ++ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); ++ OUT_RING( 0 ); ++ } ++ ++ /* generate the final descriptor for any remaining commands in this buffer */ ++ page = address + i * MACH64_DMA_CHUNKSIZE; ++ remainder = bytes - i * MACH64_DMA_CHUNKSIZE; ++ ++ /* Save dword offset of last descriptor for this buffer. ++ * This is needed to check for completion of the buffer in freelist_get ++ */ ++ entry->ring_ofs = RING_WRITE_OFS; ++ ++ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); ++ OUT_RING( page ); ++ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); ++ OUT_RING( 0 ); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA test and initialization */ ++/*@{*/ ++ ++/** ++ * Perform a simple DMA operation using the pattern registers to test whether ++ * DMA works. ++ * ++ * \return zero if successful. ++ * ++ * \note This function was the testbed for many experiences regarding Mach64 ++ * DMA operation. It is left here since it so tricky to get DMA operating ++ * properly in some architectures and hardware. ++ */ ++static int mach64_bm_dma_test(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_dma_handle_t *cpu_addr_dmah; ++ u32 data_addr; ++ u32 *table, *data; ++ u32 expected[2]; ++ u32 src_cntl, pat_reg0, pat_reg1; ++ int i, count, failed; ++ ++ DRM_DEBUG("\n"); ++ ++ table = (u32 *) dev_priv->ring.start; ++ ++ /* FIXME: get a dma buffer from the freelist here */ ++ DRM_DEBUG("Allocating data memory ...\n"); ++#ifdef __FreeBSD__ ++ DRM_UNLOCK(); ++#endif ++ cpu_addr_dmah = ++ drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful); ++#ifdef __FreeBSD__ ++ DRM_LOCK(); ++#endif ++ if (!cpu_addr_dmah) { ++ DRM_INFO("data-memory allocation failed!\n"); ++ return -ENOMEM; ++ } else { ++ data = (u32 *) cpu_addr_dmah->vaddr; ++ data_addr = (u32) cpu_addr_dmah->busaddr; ++ } ++ ++ /* Save the X server's value for SRC_CNTL and restore it ++ * in case our test fails. This prevents the X server ++ * from disabling it's cache for this register ++ */ ++ src_cntl = MACH64_READ(MACH64_SRC_CNTL); ++ pat_reg0 = MACH64_READ(MACH64_PAT_REG0); ++ pat_reg1 = MACH64_READ(MACH64_PAT_REG1); ++ ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ MACH64_WRITE(MACH64_PAT_REG0, 0x11111111); ++ MACH64_WRITE(MACH64_PAT_REG1, 0x11111111); ++ ++ mach64_do_wait_for_idle(dev_priv); ++ ++ for (i = 0; i < 2; i++) { ++ u32 reg; ++ reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); ++ DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg); ++ if (reg != 0x11111111) { ++ DRM_INFO("Error initializing test registers\n"); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return -EIO; ++ } ++ } ++ ++ /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */ ++ count = 0; ++ ++ data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); ++ data[count++] = expected[0] = 0x22222222; ++ data[count++] = expected[1] = 0xaaaaaaaa; ++ ++ while (count < 1020) { ++ data[count++] = ++ cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16)); ++ data[count++] = 0x22222222; ++ data[count++] = 0xaaaaaaaa; ++ } ++ data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16)); ++ data[count++] = 0; ++ ++ DRM_DEBUG("Preparing table ...\n"); ++ table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR + ++ MACH64_APERTURE_OFFSET); ++ table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr); ++ table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32) ++ | MACH64_DMA_HOLD_OFFSET ++ | MACH64_DMA_EOL); ++ table[MACH64_DMA_RESERVED] = 0; ++ ++ DRM_DEBUG("table[0] = 0x%08x\n", table[0]); ++ DRM_DEBUG("table[1] = 0x%08x\n", table[1]); ++ DRM_DEBUG("table[2] = 0x%08x\n", table[2]); ++ DRM_DEBUG("table[3] = 0x%08x\n", table[3]); ++ ++ for (i = 0; i < 6; i++) { ++ DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); ++ } ++ DRM_DEBUG(" ...\n"); ++ for (i = count - 5; i < count; i++) { ++ DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]); ++ } ++ ++ DRM_MEMORYBARRIER(); ++ ++ DRM_DEBUG("waiting for idle...\n"); ++ if ((i = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return i; ++ } ++ DRM_DEBUG("waiting for idle...done\n"); ++ ++ DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL)); ++ DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL)); ++ DRM_DEBUG("\n"); ++ DRM_DEBUG("data bus addr = 0x%08x\n", data_addr); ++ DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr); ++ ++ DRM_DEBUG("starting DMA transfer...\n"); ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ MACH64_WRITE(MACH64_SRC_CNTL, ++ MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | ++ MACH64_SRC_BM_OP_SYSTEM_TO_REG); ++ ++ /* Kick off the transfer */ ++ DRM_DEBUG("starting DMA transfer... done.\n"); ++ MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); ++ ++ DRM_DEBUG("waiting for idle...\n"); ++ ++ if ((i = mach64_do_wait_for_idle(dev_priv))) { ++ /* engine locked up, dump register state and reset */ ++ DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i); ++ mach64_dump_engine_info(dev_priv); ++ DRM_INFO("resetting engine ...\n"); ++ mach64_do_engine_reset(dev_priv); ++ mach64_do_wait_for_fifo(dev_priv, 3); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ DRM_INFO("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ return i; ++ } ++ ++ DRM_DEBUG("waiting for idle...done\n"); ++ ++ /* restore SRC_CNTL */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_SRC_CNTL, src_cntl); ++ ++ failed = 0; ++ ++ /* Check register values to see if the GUI master operation succeeded */ ++ for (i = 0; i < 2; i++) { ++ u32 reg; ++ reg = MACH64_READ((MACH64_PAT_REG0 + i * 4)); ++ DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg); ++ if (reg != expected[i]) { ++ failed = -1; ++ } ++ } ++ ++ /* restore pattern registers */ ++ mach64_do_wait_for_fifo(dev_priv, 2); ++ MACH64_WRITE(MACH64_PAT_REG0, pat_reg0); ++ MACH64_WRITE(MACH64_PAT_REG1, pat_reg1); ++ ++ DRM_DEBUG("freeing data buffer memory.\n"); ++ drm_pci_free(dev, cpu_addr_dmah); ++ DRM_DEBUG("returning ...\n"); ++ ++ return failed; ++} ++ ++/** ++ * Called during the DMA initialization ioctl to initialize all the necessary ++ * software and hardware state for DMA operation. ++ */ ++static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init) ++{ ++ drm_mach64_private_t *dev_priv; ++ u32 tmp; ++ int i, ret; ++ ++ DRM_DEBUG("\n"); ++ ++ dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_mach64_private_t)); ++ ++ dev_priv->is_pci = init->is_pci; ++ ++ dev_priv->fb_bpp = init->fb_bpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ dev_priv->depth_bpp = init->depth_bpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) | ++ (dev_priv->front_offset >> 3)); ++ dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) | ++ (dev_priv->back_offset >> 3)); ++ dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) | ++ (dev_priv->depth_offset >> 3)); ++ ++ dev_priv->usec_timeout = 1000000; ++ ++ /* Set up the freelist, placeholder list and pending list */ ++ INIT_LIST_HEAD(&dev_priv->free_list); ++ INIT_LIST_HEAD(&dev_priv->placeholders); ++ INIT_LIST_HEAD(&dev_priv->pending); ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("can not find sarea!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ dev_priv->fb = drm_core_findmap(dev, init->fb_offset); ++ if (!dev_priv->fb) { ++ DRM_ERROR("can not find frame buffer map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("can not find mmio map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->ring_map) { ++ DRM_ERROR("can not find ring map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->sarea_priv = (drm_mach64_sarea_t *) ++ ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); ++ ++ if (!dev_priv->is_pci) { ++ drm_core_ioremap(dev_priv->ring_map, dev); ++ if (!dev_priv->ring_map->handle) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " descriptor ring\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -ENOMEM; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = ++ drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("can not find dma buffer map!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ /* there might be a nicer way to do this - ++ dev isn't passed all the way though the mach64 - DA */ ++ dev_priv->dev_buffers = dev->agp_buffer_map; ++ ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev->agp_buffer_map->handle) { ++ DRM_ERROR("can not ioremap virtual address for" ++ " dma buffer\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -ENOMEM; ++ } ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("can not find agp texture region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ mach64_do_cleanup_dma(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ dev_priv->driver_mode = init->dma_mode; ++ ++ /* changing the FIFO size from the default causes problems with DMA */ ++ tmp = MACH64_READ(MACH64_GUI_CNTL); ++ if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) { ++ DRM_INFO("Setting FIFO size to 128 entries\n"); ++ /* FIFO must be empty to change the FIFO depth */ ++ if ((ret = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_ERROR ++ ("wait for idle failed before changing FIFO depth!\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK) ++ | MACH64_CMDFIFO_SIZE_128)); ++ /* need to read GUI_STAT for proper sync according to docs */ ++ if ((ret = mach64_do_wait_for_idle(dev_priv))) { ++ DRM_ERROR ++ ("wait for idle failed when changing FIFO depth!\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ } ++ ++ dev_priv->ring.size = 0x4000; /* 16KB */ ++ dev_priv->ring.start = dev_priv->ring_map->handle; ++ dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; ++ ++ memset(dev_priv->ring.start, 0, dev_priv->ring.size); ++ DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n", ++ dev_priv->ring.start, dev_priv->ring.start_addr); ++ ++ ret = 0; ++ if (dev_priv->driver_mode != MACH64_MODE_MMIO) { ++ ++ /* enable block 1 registers and bus mastering */ ++ MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_EXT_REG_EN) ++ & ~MACH64_BUS_MASTER_DIS)); ++ ++ /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */ ++ DRM_DEBUG("Starting DMA test...\n"); ++ if ((ret = mach64_bm_dma_test(dev))) { ++ dev_priv->driver_mode = MACH64_MODE_MMIO; ++ } ++ } ++ ++ switch (dev_priv->driver_mode) { ++ case MACH64_MODE_MMIO: ++ MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_EXT_REG_EN ++ | MACH64_BUS_MASTER_DIS)); ++ if (init->dma_mode == MACH64_MODE_MMIO) ++ DRM_INFO("Forcing pseudo-DMA mode\n"); ++ else ++ DRM_INFO ++ ("DMA test failed (ret=%d), using pseudo-DMA mode\n", ++ ret); ++ break; ++ case MACH64_MODE_DMA_SYNC: ++ DRM_INFO("DMA test succeeded, using synchronous DMA mode\n"); ++ break; ++ case MACH64_MODE_DMA_ASYNC: ++ default: ++ DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n"); ++ } ++ ++ dev_priv->ring_running = 0; ++ ++ /* setup offsets for physical address of table start and end */ ++ dev_priv->ring.head_addr = dev_priv->ring.start_addr; ++ dev_priv->ring.head = dev_priv->ring.tail = 0; ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ dev_priv->ring.space = dev_priv->ring.size; ++ ++ /* setup physical address and size of descriptor table */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ (dev_priv->ring. ++ head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB)); ++ ++ /* init frame counter */ ++ dev_priv->sarea_priv->frames_queued = 0; ++ for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ dev_priv->frame_ofs[i] = ~0; /* All ones indicates placeholder */ ++ } ++ ++ /* Allocate the DMA buffer freelist */ ++ if ((ret = mach64_init_freelist(dev))) { ++ DRM_ERROR("Freelist allocation failed\n"); ++ mach64_do_cleanup_dma(dev); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++/*******************************************************************/ ++/** MMIO Pseudo-DMA (intended primarily for debugging, not performance) ++ */ ++ ++int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ volatile u32 *ring_read; ++ struct list_head *ptr; ++ drm_mach64_freelist_t *entry; ++ struct drm_buf *buf = NULL; ++ u32 *buf_ptr; ++ u32 used, reg, target; ++ int fifo, count, found, ret, no_idle_wait; ++ ++ fifo = count = reg = no_idle_wait = 0; ++ target = MACH64_BM_ADDR; ++ ++ if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { ++ DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n"); ++ mach64_dump_engine_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return ret; ++ } ++ ++ ring_read = (u32 *) ring->start; ++ ++ while (ring->tail != ring->head) { ++ u32 buf_addr, new_target, offset; ++ u32 bytes, remaining, head, eol; ++ ++ head = ring->head; ++ ++ new_target = ++ le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET; ++ buf_addr = le32_to_cpu(ring_read[head++]); ++ eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL; ++ bytes = le32_to_cpu(ring_read[head++]) ++ & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL); ++ head++; ++ head &= ring->tail_mask; ++ ++ /* can't wait for idle between a blit setup descriptor ++ * and a HOSTDATA descriptor or the engine will lock ++ */ ++ if (new_target == MACH64_BM_HOSTDATA ++ && target == MACH64_BM_ADDR) ++ no_idle_wait = 1; ++ ++ target = new_target; ++ ++ found = 0; ++ offset = 0; ++ list_for_each(ptr, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ buf = entry->buf; ++ offset = buf_addr - GETBUFADDR(buf); ++ if (offset >= 0 && offset < MACH64_BUFFER_SIZE) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found || buf == NULL) { ++ DRM_ERROR ++ ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n", ++ head, ring->tail, buf_addr, (eol ? "eol" : "")); ++ mach64_dump_ring_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return -EINVAL; ++ } ++ ++ /* Hand feed the buffer to the card via MMIO, waiting for the fifo ++ * every 16 writes ++ */ ++ DRM_DEBUG("target: (0x%08x) %s\n", target, ++ (target == ++ MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR")); ++ DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes, ++ buf->used); ++ ++ remaining = (buf->used - offset) >> 2; /* dwords remaining in buffer */ ++ used = bytes >> 2; /* dwords in buffer for this descriptor */ ++ buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset); ++ ++ while (used) { ++ ++ if (count == 0) { ++ if (target == MACH64_BM_HOSTDATA) { ++ reg = DMAREG(MACH64_HOST_DATA0); ++ count = ++ (remaining > 16) ? 16 : remaining; ++ fifo = 0; ++ } else { ++ reg = le32_to_cpu(*buf_ptr++); ++ used--; ++ count = (reg >> 16) + 1; ++ } ++ ++ reg = reg & 0xffff; ++ reg = MMSELECT(reg); ++ } ++ while (count && used) { ++ if (!fifo) { ++ if (no_idle_wait) { ++ if ((ret = ++ mach64_do_wait_for_fifo ++ (dev_priv, 16)) < 0) { ++ no_idle_wait = 0; ++ return ret; ++ } ++ } else { ++ if ((ret = ++ mach64_do_wait_for_idle ++ (dev_priv)) < 0) { ++ return ret; ++ } ++ } ++ fifo = 16; ++ } ++ --fifo; ++ MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++)); ++ used--; ++ remaining--; ++ ++ reg += 4; ++ count--; ++ } ++ } ++ ring->head = head; ++ ring->head_addr = ring->start_addr + (ring->head * sizeof(u32)); ++ ring->space += (4 * sizeof(u32)); ++ } ++ ++ if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { ++ return ret; ++ } ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ DRM_DEBUG("completed\n"); ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA cleanup */ ++/*@{*/ ++ ++int mach64_do_cleanup_dma(struct drm_device * dev) ++{ ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv->is_pci) { ++ if (dev_priv->ring_map) ++ drm_core_ioremapfree(dev_priv->ring_map, dev); ++ ++ if (dev->agp_buffer_map) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } ++ ++ mach64_destroy_freelist(dev); ++ ++ drm_free(dev_priv, sizeof(drm_mach64_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ } ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name IOCTL handlers */ ++/*@{*/ ++ ++int mach64_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_init_t *init = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case DRM_MACH64_INIT_DMA: ++ return mach64_do_dma_init(dev, init); ++ case DRM_MACH64_CLEANUP_DMA: ++ return mach64_do_cleanup_dma(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int mach64_dma_idle(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_dma_idle(dev_priv); ++} ++ ++int mach64_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_dma_flush(dev_priv); ++} ++ ++int mach64_engine_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mach64_do_engine_reset(dev_priv); ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name Freelist management */ ++/*@{*/ ++ ++int mach64_init_freelist(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ int i; ++ ++ DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ if ((entry = ++ (drm_mach64_freelist_t *) ++ drm_alloc(sizeof(drm_mach64_freelist_t), ++ DRM_MEM_BUFLISTS)) == NULL) ++ return -ENOMEM; ++ memset(entry, 0, sizeof(drm_mach64_freelist_t)); ++ entry->buf = dma->buflist[i]; ++ ptr = &entry->list; ++ list_add_tail(ptr, &dev_priv->free_list); ++ } ++ ++ return 0; ++} ++ ++void mach64_destroy_freelist(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ struct list_head *tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++ list_for_each_safe(ptr, tmp, &dev_priv->placeholders) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++ ++ list_for_each_safe(ptr, tmp, &dev_priv->free_list) { ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS); ++ } ++} ++ ++/* IMPORTANT: This function should only be called when the engine is idle or locked up, ++ * as it assumes all buffers in the pending list have been completed by the hardware. ++ */ ++int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv) ++{ ++ struct list_head *ptr; ++ struct list_head *tmp; ++ drm_mach64_freelist_t *entry; ++ int i; ++ ++ if (list_empty(&dev_priv->pending)) ++ return 0; ++ ++ /* Iterate the pending list and move all buffers into the freelist... */ ++ i = 0; ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ if (entry->discard) { ++ entry->buf->pending = 0; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ i++; ++ } ++ } ++ ++ DRM_DEBUG("released %d buffers from pending list\n", i); ++ ++ return 0; ++} ++ ++static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ struct list_head *ptr; ++ struct list_head *tmp; ++ drm_mach64_freelist_t *entry; ++ u32 head, tail, ofs; ++ ++ mach64_ring_tick(dev_priv, ring); ++ head = ring->head; ++ tail = ring->tail; ++ ++ if (head == tail) { ++#if MACH64_EXTRA_CHECKING ++ if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) { ++ DRM_ERROR("Empty ring with non-idle engine!\n"); ++ mach64_dump_ring_info(dev_priv); ++ return -1; ++ } ++#endif ++ /* last pass is complete, so release everything */ ++ mach64_do_release_used_buffers(dev_priv); ++ DRM_DEBUG("idle engine, freed all buffers.\n"); ++ if (list_empty(&dev_priv->free_list)) { ++ DRM_ERROR("Freelist empty with idle engine\n"); ++ return -1; ++ } ++ return 0; ++ } ++ /* Look for a completed buffer and bail out of the loop ++ * as soon as we find one -- don't waste time trying ++ * to free extra bufs here, leave that to do_release_used_buffers ++ */ ++ list_for_each_safe(ptr, tmp, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ ofs = entry->ring_ofs; ++ if (entry->discard && ++ ((head < tail && (ofs < head || ofs >= tail)) || ++ (head > tail && (ofs < head && ofs >= tail)))) { ++#if MACH64_EXTRA_CHECKING ++ int i; ++ ++ for (i = head; i != tail; i = (i + 4) & ring->tail_mask) ++ { ++ u32 o1 = le32_to_cpu(((u32 *) ring-> ++ start)[i + 1]); ++ u32 o2 = GETBUFADDR(entry->buf); ++ ++ if (o1 == o2) { ++ DRM_ERROR ++ ("Attempting to free used buffer: " ++ "i=%d buf=0x%08x\n", ++ i, o1); ++ mach64_dump_ring_info(dev_priv); ++ return -1; ++ } ++ } ++#endif ++ /* found a processed buffer */ ++ entry->buf->pending = 0; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ DRM_DEBUG ++ ("freed processed buffer (head=%d tail=%d " ++ "buf ring ofs=%d).\n", ++ head, tail, ofs); ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ ++struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ drm_mach64_freelist_t *entry; ++ struct list_head *ptr; ++ int t; ++ ++ if (list_empty(&dev_priv->free_list)) { ++ if (list_empty(&dev_priv->pending)) { ++ DRM_ERROR ++ ("Couldn't get buffer - pending and free lists empty\n"); ++ t = 0; ++ list_for_each(ptr, &dev_priv->placeholders) { ++ t++; ++ } ++ DRM_INFO("Placeholders: %d\n", t); ++ return NULL; ++ } ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ int ret; ++ ++ ret = mach64_do_reclaim_completed(dev_priv); ++ if (ret == 0) ++ goto _freelist_entry_found; ++ if (ret < 0) ++ return NULL; ++ ++ DRM_UDELAY(1); ++ } ++ mach64_dump_ring_info(dev_priv); ++ DRM_ERROR ++ ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n", ++ ring->head_addr, ring->head, ring->tail); ++ return NULL; ++ } ++ ++ _freelist_entry_found: ++ ptr = dev_priv->free_list.next; ++ list_del(ptr); ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ entry->buf->used = 0; ++ list_add_tail(ptr, &dev_priv->placeholders); ++ return entry->buf; ++} ++ ++int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf) ++{ ++ struct list_head *ptr; ++ drm_mach64_freelist_t *entry; ++ ++#if MACH64_EXTRA_CHECKING ++ list_for_each(ptr, &dev_priv->pending) { ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ if (copy_buf == entry->buf) { ++ DRM_ERROR("Trying to release a pending buf\n"); ++ return -EFAULT; ++ } ++ } ++#endif ++ ptr = dev_priv->placeholders.next; ++ entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ copy_buf->pending = 0; ++ copy_buf->used = 0; ++ entry->buf = copy_buf; ++ entry->discard = 1; ++ list_del(ptr); ++ list_add_tail(ptr, &dev_priv->free_list); ++ ++ return 0; ++} ++ ++/*@}*/ ++ ++ ++/*******************************************************************/ ++/** \name DMA buffer request and submission IOCTL handler */ ++/*@{*/ ++ ++static int mach64_dma_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = mach64_freelist_get(dev_priv); ++#if MACH64_EXTRA_CHECKING ++ if (!buf) ++ return -EFAULT; ++#else ++ if (!buf) ++ return -EAGAIN; ++#endif ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int mach64_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ ret = -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = mach64_dma_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++void mach64_driver_lastclose(struct drm_device * dev) ++{ ++ mach64_do_cleanup_dma(dev); ++} ++ ++/*@}*/ +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drm.h git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h +--- git/drivers/gpu/drm-tungsten/mach64_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,256 @@ ++/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*- ++ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Frank C. Earl ++ * Leif Delgass ++ */ ++ ++#ifndef __MACH64_DRM_H__ ++#define __MACH64_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mach64_sarea.h) ++ */ ++#ifndef __MACH64_SAREA_DEFINES__ ++#define __MACH64_SAREA_DEFINES__ ++ ++/* What needs to be changed for the current vertex buffer? ++ * GH: We're going to be pedantic about this. We want the card to do as ++ * little as possible, so let's avoid having it fetch a whole bunch of ++ * register values that don't change all that often, if at all. ++ */ ++#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001 ++#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002 ++#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004 ++#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008 ++#define MACH64_UPLOAD_DP_FOG_CLR 0x0010 ++#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020 ++#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040 ++#define MACH64_UPLOAD_SETUP_CNTL 0x0080 ++#define MACH64_UPLOAD_MISC 0x0100 ++#define MACH64_UPLOAD_TEXTURE 0x0200 ++#define MACH64_UPLOAD_TEX0IMAGE 0x0400 ++#define MACH64_UPLOAD_TEX1IMAGE 0x0800 ++#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */ ++#define MACH64_UPLOAD_CONTEXT 0x00ff ++#define MACH64_UPLOAD_ALL 0x1fff ++ ++/* DMA buffer size ++ */ ++#define MACH64_BUFFER_SIZE 16384 ++ ++/* Max number of swaps allowed on the ring ++ * before the client must wait ++ */ ++#define MACH64_MAX_QUEUED_FRAMES 3U ++ ++/* Byte offsets for host blit buffer data ++ */ ++#define MACH64_HOSTDATA_BLIT_OFFSET 104 ++ ++/* Keep these small for testing. ++ */ ++#define MACH64_NR_SAREA_CLIPRECTS 8 ++ ++#define MACH64_CARD_HEAP 0 ++#define MACH64_AGP_HEAP 1 ++#define MACH64_NR_TEX_HEAPS 2 ++#define MACH64_NR_TEX_REGIONS 64 ++#define MACH64_LOG_TEX_GRANULARITY 16 ++ ++#define MACH64_TEX_MAXLEVELS 1 ++ ++#define MACH64_NR_CONTEXT_REGS 15 ++#define MACH64_NR_TEXTURE_REGS 4 ++ ++#endif /* __MACH64_SAREA_DEFINES__ */ ++ ++typedef struct { ++ unsigned int dst_off_pitch; ++ ++ unsigned int z_off_pitch; ++ unsigned int z_cntl; ++ unsigned int alpha_tst_cntl; ++ ++ unsigned int scale_3d_cntl; ++ ++ unsigned int sc_left_right; ++ unsigned int sc_top_bottom; ++ ++ unsigned int dp_fog_clr; ++ unsigned int dp_write_mask; ++ unsigned int dp_pix_width; ++ unsigned int dp_mix; ++ unsigned int dp_src; ++ ++ unsigned int clr_cmp_cntl; ++ unsigned int gui_traj_cntl; ++ ++ unsigned int setup_cntl; ++ ++ unsigned int tex_size_pitch; ++ unsigned int tex_cntl; ++ unsigned int secondary_tex_off; ++ unsigned int tex_offset; ++} drm_mach64_context_regs_t; ++ ++typedef struct drm_mach64_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex dma buffer. ++ */ ++ drm_mach64_context_regs_t context_state; ++ unsigned int dirty; ++ unsigned int vertsize; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int frames_queued; ++ ++ /* Texture memory LRU. ++ */ ++ struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS + ++ 1]; ++ unsigned int tex_age[MACH64_NR_TEX_HEAPS]; ++ int ctx_owner; ++} drm_mach64_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mach64_common.h) ++ */ ++ ++/* Mach64 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++ ++#define DRM_MACH64_INIT 0x00 ++#define DRM_MACH64_IDLE 0x01 ++#define DRM_MACH64_RESET 0x02 ++#define DRM_MACH64_SWAP 0x03 ++#define DRM_MACH64_CLEAR 0x04 ++#define DRM_MACH64_VERTEX 0x05 ++#define DRM_MACH64_BLIT 0x06 ++#define DRM_MACH64_FLUSH 0x07 ++#define DRM_MACH64_GETPARAM 0x08 ++ ++#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t) ++#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE ) ++#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET ) ++#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP ) ++#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t) ++#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t) ++#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t) ++#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH ) ++#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t) ++ ++/* Buffer flags for clears ++ */ ++#define MACH64_FRONT 0x1 ++#define MACH64_BACK 0x2 ++#define MACH64_DEPTH 0x4 ++ ++/* Primitive types for vertex buffers ++ */ ++#define MACH64_PRIM_POINTS 0x00000000 ++#define MACH64_PRIM_LINES 0x00000001 ++#define MACH64_PRIM_LINE_LOOP 0x00000002 ++#define MACH64_PRIM_LINE_STRIP 0x00000003 ++#define MACH64_PRIM_TRIANGLES 0x00000004 ++#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005 ++#define MACH64_PRIM_TRIANGLE_FAN 0x00000006 ++#define MACH64_PRIM_QUADS 0x00000007 ++#define MACH64_PRIM_QUAD_STRIP 0x00000008 ++#define MACH64_PRIM_POLYGON 0x00000009 ++ ++typedef enum _drm_mach64_dma_mode_t { ++ MACH64_MODE_DMA_ASYNC, ++ MACH64_MODE_DMA_SYNC, ++ MACH64_MODE_MMIO ++} drm_mach64_dma_mode_t; ++ ++typedef struct drm_mach64_init { ++ enum { ++ DRM_MACH64_INIT_DMA = 0x01, ++ DRM_MACH64_CLEANUP_DMA = 0x02 ++ } func; ++ ++ unsigned long sarea_priv_offset; ++ int is_pci; ++ drm_mach64_dma_mode_t dma_mode; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long ring_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++} drm_mach64_init_t; ++ ++typedef struct drm_mach64_clear { ++ unsigned int flags; ++ int x, y, w, h; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++} drm_mach64_clear_t; ++ ++typedef struct drm_mach64_vertex { ++ int prim; ++ void *buf; /* Address of vertex buffer */ ++ unsigned long used; /* Number of bytes in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_mach64_vertex_t; ++ ++typedef struct drm_mach64_blit { ++ void *buf; ++ int pitch; ++ int offset; ++ int format; ++ unsigned short x, y; ++ unsigned short width, height; ++} drm_mach64_blit_t; ++ ++typedef struct drm_mach64_getparam { ++ enum { ++ MACH64_PARAM_FRAMES_QUEUED = 0x01, ++ MACH64_PARAM_IRQ_NR = 0x02 ++ } param; ++ void *value; ++} drm_mach64_getparam_t; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.c git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c +--- git/drivers/gpu/drm-tungsten/mach64_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,105 @@ ++/* mach64_drv.c -- mach64 (Rage Pro) driver -*- linux-c -*- ++ * Created: Fri Nov 24 18:34:32 2000 by gareth@valinux.com ++ * ++ * Copyright 2000 Gareth Hughes ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Leif Delgass ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ mach64_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA ++ | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .lastclose = mach64_driver_lastclose, ++ .get_vblank_counter = mach64_get_vblank_counter, ++ .enable_vblank = mach64_enable_vblank, ++ .disable_vblank = mach64_disable_vblank, ++ .irq_preinstall = mach64_driver_irq_preinstall, ++ .irq_postinstall = mach64_driver_irq_postinstall, ++ .irq_uninstall = mach64_driver_irq_uninstall, ++ .irq_handler = mach64_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = mach64_ioctls, ++ .dma_ioctl = mach64_dma_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init mach64_init(void) ++{ ++ driver.num_ioctls = mach64_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit mach64_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(mach64_init); ++module_exit(mach64_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.h git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h +--- git/drivers/gpu/drm-tungsten/mach64_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,859 @@ ++/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*- ++ * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002 Frank C. Earl ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Frank C. Earl ++ * Leif Delgass ++ * José Fonseca ++ */ ++ ++#ifndef __MACH64_DRV_H__ ++#define __MACH64_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca" ++ ++#define DRIVER_NAME "mach64" ++#define DRIVER_DESC "DRM module for the ATI Rage Pro" ++#define DRIVER_DATE "20060718" ++ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 0 ++ ++/* FIXME: remove these when not needed */ ++/* Development driver options */ ++#define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */ ++#define MACH64_VERBOSE 0 /* Verbose debugging output */ ++ ++typedef struct drm_mach64_freelist { ++ struct list_head list; /* List pointers for free_list, placeholders, or pending list */ ++ struct drm_buf *buf; /* Pointer to the buffer */ ++ int discard; /* This flag is set when we're done (re)using a buffer */ ++ u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */ ++} drm_mach64_freelist_t; ++ ++typedef struct drm_mach64_descriptor_ring { ++ void *start; /* write pointer (cpu address) to start of descriptor ring */ ++ u32 start_addr; /* bus address of beginning of descriptor ring */ ++ int size; /* size of ring in bytes */ ++ ++ u32 head_addr; /* bus address of descriptor ring head */ ++ u32 head; /* dword offset of descriptor ring head */ ++ u32 tail; /* dword offset of descriptor ring tail */ ++ u32 tail_mask; /* mask used to wrap ring */ ++ int space; /* number of free bytes in ring */ ++} drm_mach64_descriptor_ring_t; ++ ++typedef struct drm_mach64_private { ++ drm_mach64_sarea_t *sarea_priv; ++ ++ int is_pci; ++ drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */ ++ ++ int usec_timeout; /* Timeout for the wait functions */ ++ ++ drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */ ++ int ring_running; /* Is bus mastering is enabled */ ++ ++ struct list_head free_list; /* Free-list head */ ++ struct list_head placeholders; /* Placeholder list for buffers held by clients */ ++ struct list_head pending; /* Buffers pending completion */ ++ ++ u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */ ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ atomic_t vbl_received; /**< Number of vblanks received. */ ++ ++ u32 front_offset_pitch; ++ u32 back_offset_pitch; ++ u32 depth_offset_pitch; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *fb; ++ drm_local_map_t *mmio; ++ drm_local_map_t *ring_map; ++ drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */ ++ drm_local_map_t *agp_textures; ++} drm_mach64_private_t; ++ ++extern struct drm_ioctl_desc mach64_ioctls[]; ++extern int mach64_max_ioctl; ++ ++ /* mach64_dma.c */ ++extern int mach64_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_idle(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_engine_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern void mach64_driver_lastclose(struct drm_device * dev); ++ ++extern int mach64_init_freelist(struct drm_device * dev); ++extern void mach64_destroy_freelist(struct drm_device * dev); ++extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv); ++extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, ++ struct drm_buf * copy_buf); ++ ++extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, ++ int entries); ++extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv); ++extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n); ++extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv); ++extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv); ++extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv); ++extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv); ++extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); ++ ++extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *_entry); ++extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, ++ drm_mach64_freelist_t *_entry); ++ ++extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv); ++extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv); ++extern int mach64_do_cleanup_dma(struct drm_device * dev); ++ ++ /* mach64_state.c */ ++extern int mach64_dma_clear(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_dma_blit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mach64_get_param(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++ ++extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int mach64_enable_vblank(struct drm_device *dev, int crtc); ++extern void mach64_disable_vblank(struct drm_device *dev, int crtc); ++extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); ++extern void mach64_driver_irq_preinstall(struct drm_device *dev); ++extern int mach64_driver_irq_postinstall(struct drm_device *dev); ++extern void mach64_driver_irq_uninstall(struct drm_device *dev); ++ ++/* ================================================================ ++ * Registers ++ */ ++ ++#define MACH64_AGP_BASE 0x0148 ++#define MACH64_AGP_CNTL 0x014c ++#define MACH64_ALPHA_TST_CNTL 0x0550 ++ ++#define MACH64_DSP_CONFIG 0x0420 ++#define MACH64_DSP_ON_OFF 0x0424 ++#define MACH64_EXT_MEM_CNTL 0x04ac ++#define MACH64_GEN_TEST_CNTL 0x04d0 ++#define MACH64_HW_DEBUG 0x047c ++#define MACH64_MEM_ADDR_CONFIG 0x0434 ++#define MACH64_MEM_BUF_CNTL 0x042c ++#define MACH64_MEM_CNTL 0x04b0 ++ ++#define MACH64_BM_ADDR 0x0648 ++#define MACH64_BM_COMMAND 0x0188 ++#define MACH64_BM_DATA 0x0648 ++#define MACH64_BM_FRAME_BUF_OFFSET 0x0180 ++#define MACH64_BM_GUI_TABLE 0x01b8 ++#define MACH64_BM_GUI_TABLE_CMD 0x064c ++# define MACH64_CIRCULAR_BUF_SIZE_16KB (0 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_32KB (1 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_64KB (2 << 0) ++# define MACH64_CIRCULAR_BUF_SIZE_128KB (3 << 0) ++# define MACH64_LAST_DESCRIPTOR (1 << 31) ++#define MACH64_BM_HOSTDATA 0x0644 ++#define MACH64_BM_STATUS 0x018c ++#define MACH64_BM_SYSTEM_MEM_ADDR 0x0184 ++#define MACH64_BM_SYSTEM_TABLE 0x01bc ++#define MACH64_BUS_CNTL 0x04a0 ++# define MACH64_BUS_MSTR_RESET (1 << 1) ++# define MACH64_BUS_APER_REG_DIS (1 << 4) ++# define MACH64_BUS_FLUSH_BUF (1 << 2) ++# define MACH64_BUS_MASTER_DIS (1 << 6) ++# define MACH64_BUS_EXT_REG_EN (1 << 27) ++ ++#define MACH64_CLR_CMP_CLR 0x0700 ++#define MACH64_CLR_CMP_CNTL 0x0708 ++#define MACH64_CLR_CMP_MASK 0x0704 ++#define MACH64_CONFIG_CHIP_ID 0x04e0 ++#define MACH64_CONFIG_CNTL 0x04dc ++#define MACH64_CONFIG_STAT0 0x04e4 ++#define MACH64_CONFIG_STAT1 0x0494 ++#define MACH64_CONFIG_STAT2 0x0498 ++#define MACH64_CONTEXT_LOAD_CNTL 0x072c ++#define MACH64_CONTEXT_MASK 0x0720 ++#define MACH64_COMPOSITE_SHADOW_ID 0x0798 ++#define MACH64_CRC_SIG 0x04e8 ++#define MACH64_CUSTOM_MACRO_CNTL 0x04d4 ++ ++#define MACH64_DP_BKGD_CLR 0x06c0 ++#define MACH64_DP_FOG_CLR 0x06c4 ++#define MACH64_DP_FGRD_BKGD_CLR 0x06e0 ++#define MACH64_DP_FRGD_CLR 0x06c4 ++#define MACH64_DP_FGRD_CLR_MIX 0x06dc ++ ++#define MACH64_DP_MIX 0x06d4 ++# define BKGD_MIX_NOT_D (0 << 0) ++# define BKGD_MIX_ZERO (1 << 0) ++# define BKGD_MIX_ONE (2 << 0) ++# define MACH64_BKGD_MIX_D (3 << 0) ++# define BKGD_MIX_NOT_S (4 << 0) ++# define BKGD_MIX_D_XOR_S (5 << 0) ++# define BKGD_MIX_NOT_D_XOR_S (6 << 0) ++# define MACH64_BKGD_MIX_S (7 << 0) ++# define BKGD_MIX_NOT_D_OR_NOT_S (8 << 0) ++# define BKGD_MIX_D_OR_NOT_S (9 << 0) ++# define BKGD_MIX_NOT_D_OR_S (10 << 0) ++# define BKGD_MIX_D_OR_S (11 << 0) ++# define BKGD_MIX_D_AND_S (12 << 0) ++# define BKGD_MIX_NOT_D_AND_S (13 << 0) ++# define BKGD_MIX_D_AND_NOT_S (14 << 0) ++# define BKGD_MIX_NOT_D_AND_NOT_S (15 << 0) ++# define BKGD_MIX_D_PLUS_S_DIV2 (23 << 0) ++# define FRGD_MIX_NOT_D (0 << 16) ++# define FRGD_MIX_ZERO (1 << 16) ++# define FRGD_MIX_ONE (2 << 16) ++# define FRGD_MIX_D (3 << 16) ++# define FRGD_MIX_NOT_S (4 << 16) ++# define FRGD_MIX_D_XOR_S (5 << 16) ++# define FRGD_MIX_NOT_D_XOR_S (6 << 16) ++# define MACH64_FRGD_MIX_S (7 << 16) ++# define FRGD_MIX_NOT_D_OR_NOT_S (8 << 16) ++# define FRGD_MIX_D_OR_NOT_S (9 << 16) ++# define FRGD_MIX_NOT_D_OR_S (10 << 16) ++# define FRGD_MIX_D_OR_S (11 << 16) ++# define FRGD_MIX_D_AND_S (12 << 16) ++# define FRGD_MIX_NOT_D_AND_S (13 << 16) ++# define FRGD_MIX_D_AND_NOT_S (14 << 16) ++# define FRGD_MIX_NOT_D_AND_NOT_S (15 << 16) ++# define FRGD_MIX_D_PLUS_S_DIV2 (23 << 16) ++ ++#define MACH64_DP_PIX_WIDTH 0x06d0 ++# define MACH64_HOST_TRIPLE_ENABLE (1 << 13) ++# define MACH64_BYTE_ORDER_MSB_TO_LSB (0 << 24) ++# define MACH64_BYTE_ORDER_LSB_TO_MSB (1 << 24) ++ ++#define MACH64_DP_SRC 0x06d8 ++# define MACH64_BKGD_SRC_BKGD_CLR (0 << 0) ++# define MACH64_BKGD_SRC_FRGD_CLR (1 << 0) ++# define MACH64_BKGD_SRC_HOST (2 << 0) ++# define MACH64_BKGD_SRC_BLIT (3 << 0) ++# define MACH64_BKGD_SRC_PATTERN (4 << 0) ++# define MACH64_BKGD_SRC_3D (5 << 0) ++# define MACH64_FRGD_SRC_BKGD_CLR (0 << 8) ++# define MACH64_FRGD_SRC_FRGD_CLR (1 << 8) ++# define MACH64_FRGD_SRC_HOST (2 << 8) ++# define MACH64_FRGD_SRC_BLIT (3 << 8) ++# define MACH64_FRGD_SRC_PATTERN (4 << 8) ++# define MACH64_FRGD_SRC_3D (5 << 8) ++# define MACH64_MONO_SRC_ONE (0 << 16) ++# define MACH64_MONO_SRC_PATTERN (1 << 16) ++# define MACH64_MONO_SRC_HOST (2 << 16) ++# define MACH64_MONO_SRC_BLIT (3 << 16) ++ ++#define MACH64_DP_WRITE_MASK 0x06c8 ++ ++#define MACH64_DST_CNTL 0x0530 ++# define MACH64_DST_X_RIGHT_TO_LEFT (0 << 0) ++# define MACH64_DST_X_LEFT_TO_RIGHT (1 << 0) ++# define MACH64_DST_Y_BOTTOM_TO_TOP (0 << 1) ++# define MACH64_DST_Y_TOP_TO_BOTTOM (1 << 1) ++# define MACH64_DST_X_MAJOR (0 << 2) ++# define MACH64_DST_Y_MAJOR (1 << 2) ++# define MACH64_DST_X_TILE (1 << 3) ++# define MACH64_DST_Y_TILE (1 << 4) ++# define MACH64_DST_LAST_PEL (1 << 5) ++# define MACH64_DST_POLYGON_ENABLE (1 << 6) ++# define MACH64_DST_24_ROTATION_ENABLE (1 << 7) ++ ++#define MACH64_DST_HEIGHT_WIDTH 0x0518 ++#define MACH64_DST_OFF_PITCH 0x0500 ++#define MACH64_DST_WIDTH_HEIGHT 0x06ec ++#define MACH64_DST_X_Y 0x06e8 ++#define MACH64_DST_Y_X 0x050c ++ ++#define MACH64_FIFO_STAT 0x0710 ++# define MACH64_FIFO_SLOT_MASK 0x0000ffff ++# define MACH64_FIFO_ERR (1 << 31) ++ ++#define MACH64_GEN_TEST_CNTL 0x04d0 ++# define MACH64_GUI_ENGINE_ENABLE (1 << 8) ++#define MACH64_GUI_CMDFIFO_DEBUG 0x0170 ++#define MACH64_GUI_CMDFIFO_DATA 0x0174 ++#define MACH64_GUI_CNTL 0x0178 ++# define MACH64_CMDFIFO_SIZE_MASK 0x00000003ul ++# define MACH64_CMDFIFO_SIZE_192 0x00000000ul ++# define MACH64_CMDFIFO_SIZE_128 0x00000001ul ++# define MACH64_CMDFIFO_SIZE_64 0x00000002ul ++#define MACH64_GUI_STAT 0x0738 ++# define MACH64_GUI_ACTIVE (1 << 0) ++#define MACH64_GUI_TRAJ_CNTL 0x0730 ++ ++#define MACH64_HOST_CNTL 0x0640 ++#define MACH64_HOST_DATA0 0x0600 ++ ++#define MACH64_ONE_OVER_AREA 0x029c ++#define MACH64_ONE_OVER_AREA_UC 0x0300 ++ ++#define MACH64_PAT_REG0 0x0680 ++#define MACH64_PAT_REG1 0x0684 ++ ++#define MACH64_SC_LEFT 0x06a0 ++#define MACH64_SC_RIGHT 0x06a4 ++#define MACH64_SC_LEFT_RIGHT 0x06a8 ++#define MACH64_SC_TOP 0x06ac ++#define MACH64_SC_BOTTOM 0x06b0 ++#define MACH64_SC_TOP_BOTTOM 0x06b4 ++ ++#define MACH64_SCALE_3D_CNTL 0x05fc ++#define MACH64_SCRATCH_REG0 0x0480 ++#define MACH64_SCRATCH_REG1 0x0484 ++#define MACH64_SECONDARY_TEX_OFF 0x0778 ++#define MACH64_SETUP_CNTL 0x0304 ++#define MACH64_SRC_CNTL 0x05b4 ++# define MACH64_SRC_BM_ENABLE (1 << 8) ++# define MACH64_SRC_BM_SYNC (1 << 9) ++# define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM (0 << 10) ++# define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME (1 << 10) ++# define MACH64_SRC_BM_OP_REG_TO_SYSTEM (2 << 10) ++# define MACH64_SRC_BM_OP_SYSTEM_TO_REG (3 << 10) ++#define MACH64_SRC_HEIGHT1 0x0594 ++#define MACH64_SRC_HEIGHT2 0x05ac ++#define MACH64_SRC_HEIGHT1_WIDTH1 0x0598 ++#define MACH64_SRC_HEIGHT2_WIDTH2 0x05b0 ++#define MACH64_SRC_OFF_PITCH 0x0580 ++#define MACH64_SRC_WIDTH1 0x0590 ++#define MACH64_SRC_Y_X 0x058c ++ ++#define MACH64_TEX_0_OFF 0x05c0 ++#define MACH64_TEX_CNTL 0x0774 ++#define MACH64_TEX_SIZE_PITCH 0x0770 ++#define MACH64_TIMER_CONFIG 0x0428 ++ ++#define MACH64_VERTEX_1_ARGB 0x0254 ++#define MACH64_VERTEX_1_S 0x0240 ++#define MACH64_VERTEX_1_SECONDARY_S 0x0328 ++#define MACH64_VERTEX_1_SECONDARY_T 0x032c ++#define MACH64_VERTEX_1_SECONDARY_W 0x0330 ++#define MACH64_VERTEX_1_SPEC_ARGB 0x024c ++#define MACH64_VERTEX_1_T 0x0244 ++#define MACH64_VERTEX_1_W 0x0248 ++#define MACH64_VERTEX_1_X_Y 0x0258 ++#define MACH64_VERTEX_1_Z 0x0250 ++#define MACH64_VERTEX_2_ARGB 0x0274 ++#define MACH64_VERTEX_2_S 0x0260 ++#define MACH64_VERTEX_2_SECONDARY_S 0x0334 ++#define MACH64_VERTEX_2_SECONDARY_T 0x0338 ++#define MACH64_VERTEX_2_SECONDARY_W 0x033c ++#define MACH64_VERTEX_2_SPEC_ARGB 0x026c ++#define MACH64_VERTEX_2_T 0x0264 ++#define MACH64_VERTEX_2_W 0x0268 ++#define MACH64_VERTEX_2_X_Y 0x0278 ++#define MACH64_VERTEX_2_Z 0x0270 ++#define MACH64_VERTEX_3_ARGB 0x0294 ++#define MACH64_VERTEX_3_S 0x0280 ++#define MACH64_VERTEX_3_SECONDARY_S 0x02a0 ++#define MACH64_VERTEX_3_SECONDARY_T 0x02a4 ++#define MACH64_VERTEX_3_SECONDARY_W 0x02a8 ++#define MACH64_VERTEX_3_SPEC_ARGB 0x028c ++#define MACH64_VERTEX_3_T 0x0284 ++#define MACH64_VERTEX_3_W 0x0288 ++#define MACH64_VERTEX_3_X_Y 0x0298 ++#define MACH64_VERTEX_3_Z 0x0290 ++ ++#define MACH64_Z_CNTL 0x054c ++#define MACH64_Z_OFF_PITCH 0x0548 ++ ++#define MACH64_CRTC_VLINE_CRNT_VLINE 0x0410 ++# define MACH64_CRTC_VLINE_MASK 0x000007ff ++# define MACH64_CRTC_CRNT_VLINE_MASK 0x07ff0000 ++#define MACH64_CRTC_OFF_PITCH 0x0414 ++#define MACH64_CRTC_INT_CNTL 0x0418 ++# define MACH64_CRTC_VBLANK (1 << 0) ++# define MACH64_CRTC_VBLANK_INT_EN (1 << 1) ++# define MACH64_CRTC_VBLANK_INT (1 << 2) ++# define MACH64_CRTC_VLINE_INT_EN (1 << 3) ++# define MACH64_CRTC_VLINE_INT (1 << 4) ++# define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */ ++# define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */ ++# define MACH64_CRTC_SNAPSHOT_INT_EN (1 << 7) ++# define MACH64_CRTC_SNAPSHOT_INT (1 << 8) ++# define MACH64_CRTC_I2C_INT_EN (1 << 9) ++# define MACH64_CRTC_I2C_INT (1 << 10) ++# define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */ ++# define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */ ++# define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */ ++# define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */ ++# define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */ ++# define MACH64_CRTC_CAPBUF0_INT_EN (1 << 16) ++# define MACH64_CRTC_CAPBUF0_INT (1 << 17) ++# define MACH64_CRTC_CAPBUF1_INT_EN (1 << 18) ++# define MACH64_CRTC_CAPBUF1_INT (1 << 19) ++# define MACH64_CRTC_OVERLAY_EOF_INT_EN (1 << 20) ++# define MACH64_CRTC_OVERLAY_EOF_INT (1 << 21) ++# define MACH64_CRTC_ONESHOT_CAP_INT_EN (1 << 22) ++# define MACH64_CRTC_ONESHOT_CAP_INT (1 << 23) ++# define MACH64_CRTC_BUSMASTER_EOL_INT_EN (1 << 24) ++# define MACH64_CRTC_BUSMASTER_EOL_INT (1 << 25) ++# define MACH64_CRTC_GP_INT_EN (1 << 26) ++# define MACH64_CRTC_GP_INT (1 << 27) ++# define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */ ++# define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */ ++# define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */ ++# define MACH64_CRTC_VBLANK2_INT (1 << 31) ++# define MACH64_CRTC_INT_ENS \ ++ ( \ ++ MACH64_CRTC_VBLANK_INT_EN | \ ++ MACH64_CRTC_VLINE_INT_EN | \ ++ MACH64_CRTC_SNAPSHOT_INT_EN | \ ++ MACH64_CRTC_I2C_INT_EN | \ ++ MACH64_CRTC2_VBLANK_INT_EN | \ ++ MACH64_CRTC2_VLINE_INT_EN | \ ++ MACH64_CRTC_CAPBUF0_INT_EN | \ ++ MACH64_CRTC_CAPBUF1_INT_EN | \ ++ MACH64_CRTC_OVERLAY_EOF_INT_EN | \ ++ MACH64_CRTC_ONESHOT_CAP_INT_EN | \ ++ MACH64_CRTC_BUSMASTER_EOL_INT_EN | \ ++ MACH64_CRTC_GP_INT_EN | \ ++ MACH64_CRTC_SNAPSHOT2_INT_EN | \ ++ 0 \ ++ ) ++# define MACH64_CRTC_INT_ACKS \ ++ ( \ ++ MACH64_CRTC_VBLANK_INT | \ ++ MACH64_CRTC_VLINE_INT | \ ++ MACH64_CRTC_SNAPSHOT_INT | \ ++ MACH64_CRTC_I2C_INT | \ ++ MACH64_CRTC2_VBLANK_INT | \ ++ MACH64_CRTC2_VLINE_INT | \ ++ MACH64_CRTC_CAPBUF0_INT | \ ++ MACH64_CRTC_CAPBUF1_INT | \ ++ MACH64_CRTC_OVERLAY_EOF_INT | \ ++ MACH64_CRTC_ONESHOT_CAP_INT | \ ++ MACH64_CRTC_BUSMASTER_EOL_INT | \ ++ MACH64_CRTC_GP_INT | \ ++ MACH64_CRTC_SNAPSHOT2_INT | \ ++ MACH64_CRTC_VBLANK2_INT | \ ++ 0 \ ++ ) ++ ++#define MACH64_DATATYPE_CI8 2 ++#define MACH64_DATATYPE_ARGB1555 3 ++#define MACH64_DATATYPE_RGB565 4 ++#define MACH64_DATATYPE_ARGB8888 6 ++#define MACH64_DATATYPE_RGB332 7 ++#define MACH64_DATATYPE_Y8 8 ++#define MACH64_DATATYPE_RGB8 9 ++#define MACH64_DATATYPE_VYUY422 11 ++#define MACH64_DATATYPE_YVYU422 12 ++#define MACH64_DATATYPE_AYUV444 14 ++#define MACH64_DATATYPE_ARGB4444 15 ++ ++#define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) ) ++#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) ) ++ ++#define DWMREG0 0x0400 ++#define DWMREG0_END 0x07ff ++#define DWMREG1 0x0000 ++#define DWMREG1_END 0x03ff ++ ++#define ISREG0(r) (((r) >= DWMREG0) && ((r) <= DWMREG0_END)) ++#define DMAREG0(r) (((r) - DWMREG0) >> 2) ++#define DMAREG1(r) ((((r) - DWMREG1) >> 2 ) | 0x0100) ++#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) ++ ++#define MMREG0 0x0000 ++#define MMREG0_END 0x00ff ++ ++#define ISMMREG0(r) (((r) >= MMREG0) && ((r) <= MMREG0_END)) ++#define MMSELECT0(r) (((r) << 2) + DWMREG0) ++#define MMSELECT1(r) (((((r) & 0xff) << 2) + DWMREG1)) ++#define MMSELECT(r) (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r)) ++ ++/* ================================================================ ++ * DMA constants ++ */ ++ ++/* DMA descriptor field indices: ++ * The descriptor fields are loaded into the read-only ++ * BM_* system bus master registers during a bus-master operation ++ */ ++#define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */ ++#define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */ ++#define MACH64_DMA_COMMAND 2 /* BM_COMMAND */ ++#define MACH64_DMA_RESERVED 3 /* BM_STATUS */ ++ ++/* BM_COMMAND descriptor field flags */ ++#define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */ ++#define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */ ++ ++#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */ ++#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */ ++ ++/* ================================================================ ++ * Ring operations ++ * ++ * Since the Mach64 bus master engine requires polling, these functions end ++ * up being called frequently, hence being inline. ++ */ ++ ++static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ if (mach64_do_wait_for_idle(dev_priv) < 0) { ++ mach64_do_engine_reset(dev_priv); ++ } ++ ++ if (dev_priv->driver_mode != MACH64_MODE_MMIO) { ++ /* enable bus mastering and block 1 registers */ ++ MACH64_WRITE(MACH64_BUS_CNTL, ++ (MACH64_READ(MACH64_BUS_CNTL) & ++ ~MACH64_BUS_MASTER_DIS) ++ | MACH64_BUS_EXT_REG_EN); ++ mach64_do_wait_for_idle(dev_priv); ++ } ++ ++ /* reset descriptor table ring head */ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ dev_priv->ring_running = 1; ++} ++ ++static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv, ++ drm_mach64_descriptor_ring_t * ring) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ /* reset descriptor table ring head */ ++ MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD, ++ ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); ++ ++ if (dev_priv->driver_mode == MACH64_MODE_MMIO) { ++ mach64_do_dispatch_pseudo_dma(dev_priv); ++ } else { ++ /* enable GUI bus mastering, and sync the bus master to the GUI */ ++ MACH64_WRITE(MACH64_SRC_CNTL, ++ MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC | ++ MACH64_SRC_BM_OP_SYSTEM_TO_REG); ++ ++ /* kick off the transfer */ ++ MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0); ++ if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) { ++ if ((mach64_do_wait_for_idle(dev_priv)) < 0) { ++ DRM_ERROR("idle failed, resetting engine\n"); ++ mach64_dump_engine_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return; ++ } ++ mach64_do_release_used_buffers(dev_priv); ++ } ++ } ++} ++ ++/** ++ * Poll the ring head and make sure the bus master is alive. ++ * ++ * Mach64's bus master engine will stop if there are no more entries to process. ++ * This function polls the engine for the last processed entry and calls ++ * mach64_ring_resume if there is an unprocessed entry. ++ * ++ * Note also that, since we update the ring tail while the bus master engine is ++ * in operation, it is possible that the last tail update was too late to be ++ * processed, and the bus master engine stops at the previous tail position. ++ * Therefore it is important to call this function frequently. ++ */ ++static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv, ++ drm_mach64_descriptor_ring_t * ring) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ ring->head_addr, ring->head, ring->tail, ring->space); ++ ++ if (!dev_priv->ring_running) { ++ mach64_ring_start(dev_priv); ++ ++ if (ring->head != ring->tail) { ++ mach64_ring_resume(dev_priv, ring); ++ } ++ } else { ++ /* GUI_ACTIVE must be read before BM_GUI_TABLE to ++ * correctly determine the ring head ++ */ ++ int gui_active = ++ MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE; ++ ++ ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0; ++ ++ if (gui_active) { ++ /* If not idle, BM_GUI_TABLE points one descriptor ++ * past the current head ++ */ ++ if (ring->head_addr == ring->start_addr) { ++ ring->head_addr += ring->size; ++ } ++ ring->head_addr -= 4 * sizeof(u32); ++ } ++ ++ if (ring->head_addr < ring->start_addr || ++ ring->head_addr >= ring->start_addr + ring->size) { ++ DRM_ERROR("bad ring head address: 0x%08x\n", ++ ring->head_addr); ++ mach64_dump_ring_info(dev_priv); ++ mach64_do_engine_reset(dev_priv); ++ return; ++ } ++ ++ ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32); ++ ++ if (!gui_active && ring->head != ring->tail) { ++ mach64_ring_resume(dev_priv, ring); ++ } ++ } ++} ++ ++static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv) ++{ ++ DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n", ++ dev_priv->ring.head_addr, dev_priv->ring.head, ++ dev_priv->ring.tail, dev_priv->ring.space); ++ ++ /* restore previous SRC_CNTL to disable busmastering */ ++ mach64_do_wait_for_fifo(dev_priv, 1); ++ MACH64_WRITE(MACH64_SRC_CNTL, 0); ++ ++ /* disable busmastering but keep the block 1 registers enabled */ ++ mach64_do_wait_for_idle(dev_priv); ++ MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL) ++ | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN); ++ ++ dev_priv->ring_running = 0; ++} ++ ++static __inline__ void ++mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ ++ DRM_DEBUG("\n"); ++ ++ mach64_ring_tick(dev_priv, ring); ++ ++ ring->space = (ring->head - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) { ++ ring->space += ring->size; ++ } ++} ++ ++/* ================================================================ ++ * DMA macros ++ * ++ * Mach64's ring buffer doesn't take register writes directly. These ++ * have to be written indirectly in DMA buffers. These macros simplify ++ * the task of setting up a buffer, writing commands to it, and ++ * queuing the buffer in the ring. ++ */ ++ ++#define DMALOCALS \ ++ drm_mach64_freelist_t *_entry = NULL; \ ++ struct drm_buf *_buf = NULL; \ ++ u32 *_buf_wptr; int _outcount ++ ++#define GETBUFPTR( __buf ) \ ++((dev_priv->is_pci) ? \ ++ ((u32 *)(__buf)->address) : \ ++ ((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset))) ++ ++#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address) ++ ++#define GETRINGOFFSET() (_entry->ring_ofs) ++ ++static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t * ++ dev_priv, ++ drm_mach64_freelist_t ** ++ entry, struct drm_buf * buf) ++{ ++ struct list_head *ptr; ++#if MACH64_EXTRA_CHECKING ++ if (list_empty(&dev_priv->pending)) { ++ DRM_ERROR("Empty pending list in \n"); ++ return -EINVAL; ++ } ++#endif ++ ptr = dev_priv->pending.prev; ++ *entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ while ((*entry)->buf != buf) { ++ if (ptr == &dev_priv->pending) { ++ return -EFAULT; ++ } ++ ptr = ptr->prev; ++ *entry = list_entry(ptr, drm_mach64_freelist_t, list); ++ } ++ return 0; ++} ++ ++#define DMASETPTR( _p ) \ ++do { \ ++ _buf = (_p); \ ++ _outcount = 0; \ ++ _buf_wptr = GETBUFPTR( _buf ); \ ++} while(0) ++ ++/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */ ++#define DMAGETPTR( file_priv, dev_priv, n ) \ ++do { \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAGETPTR( %d )\n", (n) ); \ ++ } \ ++ _buf = mach64_freelist_get( dev_priv ); \ ++ if (_buf == NULL) { \ ++ DRM_ERROR("couldn't get buffer in DMAGETPTR\n"); \ ++ return -EAGAIN; \ ++ } \ ++ if (_buf->pending) { \ ++ DRM_ERROR("pending buf in DMAGETPTR\n"); \ ++ return -EFAULT; \ ++ } \ ++ _buf->file_priv = file_priv; \ ++ _outcount = 0; \ ++ \ ++ _buf_wptr = GETBUFPTR( _buf ); \ ++} while (0) ++ ++#define DMAOUTREG( reg, val ) \ ++do { \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( " DMAOUTREG( 0x%x = 0x%08x )\n", \ ++ reg, val ); \ ++ } \ ++ _buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg)); \ ++ _buf_wptr[_outcount++] = cpu_to_le32((val)); \ ++ _buf->used += 8; \ ++} while (0) ++ ++#define DMAADVANCE( dev_priv, _discard ) \ ++ do { \ ++ struct list_head *ptr; \ ++ int ret; \ ++ \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAADVANCE() in \n" ); \ ++ } \ ++ \ ++ if (_buf->used <= 0) { \ ++ DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \ ++ _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ if (_buf->pending) { \ ++ /* This is a resued buffer, so we need to find it in the pending list */ \ ++ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ ++ DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \ ++ return ret; \ ++ } \ ++ if (_entry->discard) { \ ++ DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ } else { \ ++ if (list_empty(&dev_priv->placeholders)) { \ ++ DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \ ++ return -EFAULT; \ ++ } \ ++ ptr = dev_priv->placeholders.next; \ ++ list_del(ptr); \ ++ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ ++ _buf->pending = 1; \ ++ _entry->buf = _buf; \ ++ list_add_tail(ptr, &dev_priv->pending); \ ++ } \ ++ _entry->discard = (_discard); \ ++ if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \ ++ return ret; \ ++ } while (0) ++ ++#define DMADISCARDBUF() \ ++ do { \ ++ if (_entry == NULL) { \ ++ int ret; \ ++ if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ ++ DRM_ERROR( "couldn't find pending buf %d\n", \ ++ _buf->idx ); \ ++ return ret; \ ++ } \ ++ } \ ++ _entry->discard = 1; \ ++ } while(0) ++ ++#define DMAADVANCEHOSTDATA( dev_priv ) \ ++ do { \ ++ struct list_head *ptr; \ ++ int ret; \ ++ \ ++ if ( MACH64_VERBOSE ) { \ ++ DRM_INFO( "DMAADVANCEHOSTDATA() in \n" ); \ ++ } \ ++ \ ++ if (_buf->used <= 0) { \ ++ DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \ ++ return -EFAULT; \ ++ } \ ++ if (list_empty(&dev_priv->placeholders)) { \ ++ DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \ ++ return -EFAULT; \ ++ } \ ++ \ ++ ptr = dev_priv->placeholders.next; \ ++ list_del(ptr); \ ++ _entry = list_entry(ptr, drm_mach64_freelist_t, list); \ ++ _entry->buf = _buf; \ ++ _entry->buf->pending = 1; \ ++ list_add_tail(ptr, &dev_priv->pending); \ ++ _entry->discard = 1; \ ++ if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \ ++ return ret; \ ++ } while (0) ++ ++#endif /* __MACH64_DRV_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_irq.c git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c +--- git/drivers/gpu/drm-tungsten/mach64_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,159 @@ ++/* mach64_irq.c -- IRQ handling for ATI Mach64 -*- linux-c -*- ++ * Created: Tue Feb 25, 2003 by Leif Delgass, based on radeon_irq.c/r128_irq.c ++ */ ++/*- ++ * Copyright (C) The Weather Channel, Inc. 2002. ++ * Copyright 2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ * Leif Delgass ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = arg; ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ int status; ++ ++ status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ /* VBLANK interrupt */ ++ if (status & MACH64_CRTC_VBLANK_INT) { ++ /* Mask off all interrupt ack bits before setting the ack bit, since ++ * there may be other handlers outside the DRM. ++ * ++ * NOTE: On mach64, you need to keep the enable bits set when doing ++ * the ack, despite what the docs say about not acking and enabling ++ * in a single write. ++ */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, ++ (status & ~MACH64_CRTC_INT_ACKS) ++ | MACH64_CRTC_VBLANK_INT); ++ ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc) ++{ ++ const drm_mach64_private_t *const dev_priv = dev->dev_private; ++ ++ if (crtc != 0) ++ return 0; ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++int mach64_enable_vblank(struct drm_device * dev, int crtc) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ /* Turn on VBLANK interrupt */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) ++ | MACH64_CRTC_VBLANK_INT_EN); ++ ++ return 0; ++} ++ ++void mach64_disable_vblank(struct drm_device * dev, int crtc) ++{ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ return; ++ } ++ ++ /* ++ * FIXME: implement proper interrupt disable by using the vblank ++ * counter register (if available). ++ */ ++} ++ ++static void mach64_disable_vblank_local(struct drm_device * dev, int crtc) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ return; ++ } ++ ++ DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ /* Disable and clear VBLANK interrupt */ ++ MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN) ++ | MACH64_CRTC_VBLANK_INT); ++} ++ ++void mach64_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ ++ u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); ++ ++ DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status); ++ ++ mach64_disable_vblank_local(dev, 0); ++} ++ ++int mach64_driver_irq_postinstall(struct drm_device * dev) ++{ ++ return drm_vblank_init(dev, 1); ++} ++ ++void mach64_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ mach64_disable_vblank_local(dev, 0); ++ ++ DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n", ++ MACH64_READ(MACH64_CRTC_INT_CNTL)); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/mach64_state.c git-nokia/drivers/gpu/drm-tungsten/mach64_state.c +--- git/drivers/gpu/drm-tungsten/mach64_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mach64_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,910 @@ ++/* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*- ++ * Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 Gareth Hughes ++ * Copyright 2002-2003 Leif Delgass ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Leif Delgass ++ * José Fonseca ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mach64_drm.h" ++#include "mach64_drv.h" ++ ++/* Interface history: ++ * ++ * 1.0 - Initial mach64 DRM ++ * ++ */ ++struct drm_ioctl_desc mach64_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH), ++}; ++ ++int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls); ++ ++/* ================================================================ ++ * DMA hardware state programming functions ++ */ ++ ++static void mach64_print_dirty(const char *msg, unsigned int flags) ++{ ++ DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n", ++ msg, ++ flags, ++ (flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " : ++ "", ++ (flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "", ++ (flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " : ++ "", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "", ++ (flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " : ++ "", ++ (flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "", ++ (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "", ++ (flags & MACH64_UPLOAD_MISC) ? "misc, " : "", ++ (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "", ++ (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "", ++ (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "", ++ (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : ""); ++} ++ ++/* Mach64 doesn't have hardware cliprects, just one hardware scissor, ++ * so the GL scissor is intersected with each cliprect here ++ */ ++/* This function returns 0 on success, 1 for no intersection, and ++ * negative for an error ++ */ ++static int mach64_emit_cliprect(struct drm_file *file_priv, ++ drm_mach64_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ u32 sc_left_right, sc_top_bottom; ++ struct drm_clip_rect scissor; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *regs = &sarea_priv->context_state; ++ DMALOCALS; ++ ++ DRM_DEBUG("box=%p\n", box); ++ ++ /* Get GL scissor */ ++ /* FIXME: store scissor in SAREA as a cliprect instead of in ++ * hardware format, or do intersection client-side ++ */ ++ scissor.x1 = regs->sc_left_right & 0xffff; ++ scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16; ++ scissor.y1 = regs->sc_top_bottom & 0xffff; ++ scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16; ++ ++ /* Intersect GL scissor with cliprect */ ++ if (box->x1 > scissor.x1) ++ scissor.x1 = box->x1; ++ if (box->y1 > scissor.y1) ++ scissor.y1 = box->y1; ++ if (box->x2 < scissor.x2) ++ scissor.x2 = box->x2; ++ if (box->y2 < scissor.y2) ++ scissor.y2 = box->y2; ++ /* positive return means skip */ ++ if (scissor.x1 >= scissor.x2) ++ return 1; ++ if (scissor.y1 >= scissor.y2) ++ return 1; ++ ++ DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */ ++ ++ sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16)); ++ sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16)); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom); ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ return 0; ++} ++ ++static __inline__ int mach64_emit_state(struct drm_file *file_priv, ++ drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *regs = &sarea_priv->context_state; ++ unsigned int dirty = sarea_priv->dirty; ++ u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2); ++ DMALOCALS; ++ ++ if (MACH64_VERBOSE) { ++ mach64_print_dirty(__FUNCTION__, dirty); ++ } else { ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ } ++ ++ DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */ ++ ++ if (dirty & MACH64_UPLOAD_MISC) { ++ DMAOUTREG(MACH64_DP_MIX, regs->dp_mix); ++ DMAOUTREG(MACH64_DP_SRC, regs->dp_src); ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_MISC; ++ } ++ ++ if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) { ++ DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH; ++ } ++ if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) { ++ DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH; ++ } ++ if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) { ++ DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl); ++ DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL; ++ } ++ if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) { ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL; ++ } ++ if (dirty & MACH64_UPLOAD_DP_FOG_CLR) { ++ DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR; ++ } ++ if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) { ++ DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK; ++ } ++ if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) { ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH; ++ } ++ if (dirty & MACH64_UPLOAD_SETUP_CNTL) { ++ DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL; ++ } ++ ++ if (dirty & MACH64_UPLOAD_TEXTURE) { ++ DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch); ++ DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl); ++ DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off); ++ DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset); ++ sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE; ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS; ++ ++ return 0; ++ ++} ++ ++/* ================================================================ ++ * DMA command dispatch functions ++ */ ++ ++static int mach64_dma_dispatch_clear(struct drm_device * dev, ++ struct drm_file *file_priv, ++ unsigned int flags, ++ int cx, int cy, int cw, int ch, ++ unsigned int clear_color, ++ unsigned int clear_depth) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_context_regs_t *ctx = &sarea_priv->context_state; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ u32 fb_bpp, depth_bpp; ++ int i; ++ DMALOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 32: ++ fb_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (dev_priv->depth_bpp) { ++ case 16: ++ depth_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 24: ++ case 32: ++ depth_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (!nbox) ++ return 0; ++ ++ DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */ ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", ++ pbox[i].x1, pbox[i].y1, ++ pbox[i].x2, pbox[i].y2, flags); ++ ++ if (flags & (MACH64_FRONT | MACH64_BACK)) { ++ /* Setup for color buffer clears ++ */ ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | ++ (fb_bpp << 4) | ++ (fb_bpp << 8) | ++ (fb_bpp << 16) | ++ (fb_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color); ++ DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | ++ MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | ++ MACH64_FRGD_SRC_FRGD_CLR | ++ MACH64_MONO_SRC_ONE)); ++ ++ } ++ ++ if (flags & MACH64_FRONT) { ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->front_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ if (flags & MACH64_BACK) { ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->back_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ if (flags & MACH64_DEPTH) { ++ /* Setup for depth buffer clear ++ */ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) | ++ (depth_bpp << 4) | ++ (depth_bpp << 8) | ++ (depth_bpp << 16) | ++ (depth_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth); ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | ++ MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | ++ MACH64_FRGD_SRC_FRGD_CLR | ++ MACH64_MONO_SRC_ONE)); ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ dev_priv->depth_offset_pitch); ++ DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ } ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ return 0; ++} ++ ++static int mach64_dma_dispatch_swap(struct drm_device * dev, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ u32 fb_bpp; ++ int i; ++ DMALOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = MACH64_DATATYPE_RGB565; ++ break; ++ case 32: ++ default: ++ fb_bpp = MACH64_DATATYPE_ARGB8888; ++ break; ++ } ++ ++ if (!nbox) ++ return 0; ++ ++ DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */ ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */ ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16)); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT | ++ MACH64_DST_Y_TOP_TO_BOTTOM)); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | ++ (fb_bpp << 4) | ++ (fb_bpp << 8) | ++ (fb_bpp << 16) | (fb_bpp << 28))); ++ ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); ++ DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S)); ++ DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR | ++ MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE)); ++ ++ DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch); ++ DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch); ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", ++ pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2); ++ ++ DMAOUTREG(MACH64_SRC_WIDTH1, w); ++ DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y); ++ DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); ++ ++ } ++ ++ DMAADVANCE(dev_priv, 1); ++ ++ if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) { ++ for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) { ++ dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1]; ++ } ++ dev_priv->frame_ofs[i] = GETRINGOFFSET(); ++ ++ dev_priv->sarea_priv->frames_queued++; ++ } ++ ++ return 0; ++} ++ ++static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv) ++{ ++ drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int i, start; ++ u32 head, tail, ofs; ++ ++ DRM_DEBUG("\n"); ++ ++ if (sarea_priv->frames_queued == 0) ++ return 0; ++ ++ tail = ring->tail; ++ mach64_ring_tick(dev_priv, ring); ++ head = ring->head; ++ ++ start = (MACH64_MAX_QUEUED_FRAMES - ++ DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued)); ++ ++ if (head == tail) { ++ sarea_priv->frames_queued = 0; ++ for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ dev_priv->frame_ofs[i] = ~0; ++ } ++ return 0; ++ } ++ ++ for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { ++ ofs = dev_priv->frame_ofs[i]; ++ DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs); ++ if (ofs == ~0 || ++ (head < tail && (ofs < head || ofs >= tail)) || ++ (head > tail && (ofs < head && ofs >= tail))) { ++ sarea_priv->frames_queued = ++ (MACH64_MAX_QUEUED_FRAMES - 1) - i; ++ dev_priv->frame_ofs[i] = ~0; ++ } ++ } ++ ++ return sarea_priv->frames_queued; ++} ++ ++/* Copy and verify a client submited buffer. ++ * FIXME: Make an assembly optimized version ++ */ ++static __inline__ int copy_from_user_vertex(u32 *to, ++ const u32 __user *ufrom, ++ unsigned long bytes) ++{ ++ unsigned long n = bytes; /* dwords remaining in buffer */ ++ u32 *from, *orig_from; ++ ++ from = drm_alloc(bytes, DRM_MEM_DRIVER); ++ if (from == NULL) ++ return -ENOMEM; ++ ++ if (DRM_COPY_FROM_USER(from, ufrom, bytes)) { ++ drm_free(from, bytes, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ orig_from = from; /* we'll be modifying the "from" ptr, so save it */ ++ ++ n >>= 2; ++ ++ while (n > 1) { ++ u32 data, reg, count; ++ ++ data = *from++; ++ ++ n--; ++ ++ reg = le32_to_cpu(data); ++ count = (reg >> 16) + 1; ++ if (count <= n) { ++ n -= count; ++ reg &= 0xffff; ++ ++ /* This is an exact match of Mach64's Setup Engine registers, ++ * excluding SETUP_CNTL (1_C1). ++ */ ++ if ((reg >= 0x0190 && reg < 0x01c1) || ++ (reg >= 0x01ca && reg <= 0x01cf)) { ++ *to++ = data; ++ memcpy(to, from, count << 2); ++ from += count; ++ to += count; ++ } else { ++ DRM_ERROR("Got bad command: 0x%04x\n", reg); ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ return -EACCES; ++ } ++ } else { ++ DRM_ERROR ++ ("Got bad command count(=%u) dwords remaining=%lu\n", ++ count, n); ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ return -EINVAL; ++ } ++ } ++ ++ drm_free(orig_from, bytes, DRM_MEM_DRIVER); ++ if (n == 0) ++ return 0; ++ else { ++ DRM_ERROR("Bad buf->used(=%lu)\n", bytes); ++ return -EINVAL; ++ } ++} ++ ++static int mach64_dma_dispatch_vertex(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_mach64_vertex_t * vertex) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ struct drm_buf *copy_buf; ++ void *buf = vertex->buf; ++ unsigned long used = vertex->used; ++ int ret = 0; ++ int i = 0; ++ int done = 0; ++ int verify_ret = 0; ++ DMALOCALS; ++ ++ DRM_DEBUG("buf=%p used=%lu nbox=%d\n", ++ buf, used, sarea_priv->nbox); ++ ++ if (!used) ++ goto _vertex_done; ++ ++ copy_buf = mach64_freelist_get(dev_priv); ++ if (copy_buf == NULL) { ++ DRM_ERROR("couldn't get buffer\n"); ++ return -EAGAIN; ++ } ++ ++ /* Mach64's vertex data is actually register writes. To avoid security ++ * compromises these register writes have to be verified and copied from ++ * user space into a private DMA buffer. ++ */ ++ verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used); ++ ++ if (verify_ret != 0) { ++ mach64_freelist_put(dev_priv, copy_buf); ++ goto _vertex_done; ++ } ++ ++ copy_buf->used = used; ++ ++ DMASETPTR(copy_buf); ++ ++ if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { ++ ret = mach64_emit_state(file_priv, dev_priv); ++ if (ret < 0) ++ return ret; ++ } ++ ++ do { ++ /* Emit the next cliprect */ ++ if (i < sarea_priv->nbox) { ++ ret = mach64_emit_cliprect(file_priv, dev_priv, ++ &sarea_priv->boxes[i]); ++ if (ret < 0) { ++ /* failed to get buffer */ ++ return ret; ++ } else if (ret != 0) { ++ /* null intersection with scissor */ ++ continue; ++ } ++ } ++ if ((i >= sarea_priv->nbox - 1)) ++ done = 1; ++ ++ /* Add the buffer to the DMA queue */ ++ DMAADVANCE(dev_priv, done); ++ ++ } while (++i < sarea_priv->nbox); ++ ++ if (!done) { ++ if (copy_buf->pending) { ++ DMADISCARDBUF(); ++ } else { ++ /* This buffer wasn't used (no cliprects), so place it ++ * back on the free list ++ */ ++ mach64_freelist_put(dev_priv, copy_buf); ++ } ++ } ++ ++_vertex_done: ++ sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++ ++ return verify_ret; ++} ++ ++static __inline__ int copy_from_user_blit(u32 *to, ++ const u32 __user *ufrom, ++ unsigned long bytes) ++{ ++ to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET); ++ ++ if (DRM_COPY_FROM_USER(to, ufrom, bytes)) { ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int mach64_dma_dispatch_blit(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_mach64_blit_t * blit) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ int dword_shift, dwords; ++ unsigned long used; ++ struct drm_buf *copy_buf; ++ int verify_ret = 0; ++ DMALOCALS; ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (blit->format) { ++ case MACH64_DATATYPE_ARGB8888: ++ dword_shift = 0; ++ break; ++ case MACH64_DATATYPE_ARGB1555: ++ case MACH64_DATATYPE_RGB565: ++ case MACH64_DATATYPE_VYUY422: ++ case MACH64_DATATYPE_YVYU422: ++ case MACH64_DATATYPE_ARGB4444: ++ dword_shift = 1; ++ break; ++ case MACH64_DATATYPE_CI8: ++ case MACH64_DATATYPE_RGB8: ++ dword_shift = 2; ++ break; ++ default: ++ DRM_ERROR("invalid blit format %d\n", blit->format); ++ return -EINVAL; ++ } ++ ++ /* Set buf->used to the bytes of blit data based on the blit dimensions ++ * and verify the size. When the setup is emitted to the buffer with ++ * the DMA* macros below, buf->used is incremented to include the bytes ++ * used for setup as well as the blit data. ++ */ ++ dwords = (blit->width * blit->height) >> dword_shift; ++ used = dwords << 2; ++ if (used <= 0 || ++ used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) { ++ DRM_ERROR("Invalid blit size: %lu bytes\n", used); ++ return -EINVAL; ++ } ++ ++ copy_buf = mach64_freelist_get(dev_priv); ++ if (copy_buf == NULL) { ++ DRM_ERROR("couldn't get buffer\n"); ++ return -EAGAIN; ++ } ++ ++ /* Copy the blit data from userspace. ++ * ++ * XXX: This is overkill. The most efficient solution would be having ++ * two sets of buffers (one set private for vertex data, the other set ++ * client-writable for blits). However that would bring more complexity ++ * and would break backward compatability. The solution currently ++ * implemented is keeping all buffers private, allowing to secure the ++ * driver, without increasing complexity at the expense of some speed ++ * transfering data. ++ */ ++ verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used); ++ ++ if (verify_ret != 0) { ++ mach64_freelist_put(dev_priv, copy_buf); ++ goto _blit_done; ++ } ++ ++ copy_buf->used = used; ++ ++ /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent, ++ * continuation buffers? ++ */ ++ ++ /* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require ++ * a register command every 16 dwords. State setup is added at the start of the ++ * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET ++ */ ++ DMASETPTR(copy_buf); ++ ++ DMAOUTREG(MACH64_Z_CNTL, 0); ++ DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); ++ ++ DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */ ++ DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16)); ++ ++ DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); /* disable */ ++ DMAOUTREG(MACH64_GUI_TRAJ_CNTL, ++ MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM); ++ ++ DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0) /* dst pix width */ ++ |(blit->format << 4) /* composite pix width */ ++ |(blit->format << 8) /* src pix width */ ++ |(blit->format << 16) /* host data pix width */ ++ |(blit->format << 28) /* scaler/3D pix width */ ++ ); ++ ++ DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); /* enable all planes */ ++ DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S); ++ DMAOUTREG(MACH64_DP_SRC, ++ MACH64_BKGD_SRC_BKGD_CLR ++ | MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE); ++ ++ DMAOUTREG(MACH64_DST_OFF_PITCH, ++ (blit->pitch << 22) | (blit->offset >> 3)); ++ DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x); ++ DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width); ++ ++ DRM_DEBUG("%lu bytes\n", used); ++ ++ /* Add the buffer to the queue */ ++ DMAADVANCEHOSTDATA(dev_priv); ++ ++_blit_done: ++ return verify_ret; ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++ ++int mach64_dma_clear(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_clear_t *clear = data; ++ int ret; ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags, ++ clear->x, clear->y, clear->w, clear->h, ++ clear->clear_color, ++ clear->clear_depth); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC); ++ return ret; ++} ++ ++int mach64_dma_swap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int ret; ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ ret = mach64_dma_dispatch_swap(dev, file_priv); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC); ++ return ret; ++} ++ ++int mach64_dma_vertex(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n", ++ DRM_CURRENTPID, ++ vertex->buf, vertex->used, vertex->discard); ++ ++ if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) { ++ DRM_ERROR("Invalid vertex buffer size: %lu bytes\n", ++ vertex->used); ++ return -EINVAL; ++ } ++ ++ if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; ++ ++ return mach64_dma_dispatch_vertex(dev, file_priv, vertex); ++} ++ ++int mach64_dma_blit(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mach64_blit_t *blit = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ ret = mach64_dma_dispatch_blit(dev, file_priv, blit); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | ++ MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS); ++ ++ return ret; ++} ++ ++int mach64_get_param(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mach64_private_t *dev_priv = dev->dev_private; ++ drm_mach64_getparam_t *param = data; ++ int value; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (param->param) { ++ case MACH64_PARAM_FRAMES_QUEUED: ++ /* Needs lock since it calls mach64_ring_tick() */ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ value = mach64_do_get_frames_queued(dev_priv); ++ break; ++ case MACH64_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/Makefile git-nokia/drivers/gpu/drm-tungsten/Makefile +--- git/drivers/gpu/drm-tungsten/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/Makefile 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,74 @@ ++# ++# Makefile for the drm device driver. This driver provides support for the ++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ++# ++# Based on David Woodhouse's mtd build. ++# ++# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $ ++# ++ ++drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ ++ drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ ++ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ ++ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ ++ drm_memory_debug.o ati_pcigart.o drm_sman.o \ ++ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ ++ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ ++ drm_regman.o drm_vm_nopage_compat.o drm_gem.o ++pvr2d-objs := pvr2d_drv.o ++tdfx-objs := tdfx_drv.o ++r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o ++mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o ++i810-objs := i810_drv.o i810_dma.o ++i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ ++ i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \ ++ i915_opregion.o \ ++ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o ++nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ ++ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ ++ nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ ++ nv04_timer.o \ ++ nv04_mc.o nv40_mc.o nv50_mc.o \ ++ nv04_fb.o nv10_fb.o nv40_fb.o \ ++ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ ++ nv04_graph.o nv10_graph.o nv20_graph.o \ ++ nv40_graph.o nv50_graph.o \ ++ nv04_instmem.o nv50_instmem.o ++radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o ++sis-objs := sis_drv.o sis_mm.o ++ffb-objs := ffb_drv.o ffb_context.o ++savage-objs := savage_drv.o savage_bci.o savage_state.o ++via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ ++ via_video.o via_dmablit.o via_fence.o via_buffer.o ++mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o ++nv-objs := nv_drv.o ++xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ ++ xgi_fence.o ++ ++ifeq ($(CONFIG_COMPAT),y) ++drm-objs += drm_ioc32.o ++radeon-objs += radeon_ioc32.o ++mga-objs += mga_ioc32.o ++r128-objs += r128_ioc32.o ++i915-objs += i915_ioc32.o ++nouveau-objs += nouveau_ioc32.o ++xgi-objs += xgi_ioc32.o ++endif ++ ++obj-m += drm.o ++obj-$(CONFIG_DRM_TUNGSTEN_PVR2D) += pvr2d.o ++obj-$(CONFIG_DRM_TUNGSTEN_TDFX) += tdfx.o ++obj-$(CONFIG_DRM_TUNGSTEN_R128) += r128.o ++obj-$(CONFIG_DRM_TUNGSTEN_RADEON) += radeon.o ++obj-$(CONFIG_DRM_TUNGSTEN_MGA) += mga.o ++obj-$(CONFIG_DRM_TUNGSTEN_I810) += i810.o ++obj-$(CONFIG_DRM_TUNGSTEN_I915) += i915.o ++obj-$(CONFIG_DRM_TUNGSTEN_SIS) += sis.o ++obj-$(CONFIG_DRM_TUNGSTEN_FFB) += ffb.o ++obj-$(CONFIG_DRM_TUNGSTEN_SAVAGE) += savage.o ++obj-$(CONFIG_DRM_TUNGSTEN_VIA) += via.o ++obj-$(CONFIG_DRM_TUNGSTEN_MACH64) += mach64.o ++obj-$(CONFIG_DRM_TUNGSTEN_NV) += nv.o ++obj-$(CONFIG_DRM_TUNGSTEN_NOUVEAU) += nouveau.o ++obj-$(CONFIG_DRM_TUNGSTEN_XGI) += xgi.o ++ +diff -Nurd git/drivers/gpu/drm-tungsten/mga_dma.c git-nokia/drivers/gpu/drm-tungsten/mga_dma.c +--- git/drivers/gpu/drm-tungsten/mga_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1161 @@ ++/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ */ ++/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++/** ++ * \file mga_dma.c ++ * DMA support for MGA G200 / G400. ++ * ++ * \author Rickard E. (Rik) Faith ++ * \author Jeff Hartmann ++ * \author Keith Whitwell ++ * \author Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++#define MGA_DEFAULT_USEC_TIMEOUT 10000 ++#define MGA_FREELIST_DEBUG 0 ++ ++#define MINIMAL_CLEANUP 0 ++#define FULL_CLEANUP 1 ++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); ++ ++/* ================================================================ ++ * Engine control ++ */ ++ ++int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) ++{ ++ u32 status = 0; ++ int i; ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ if (status == MGA_ENDPRDMASTS) { ++ MGA_WRITE8(MGA_CRTC_INDEX, 0); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if MGA_DMA_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++static int mga_do_dma_reset(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ ++ DRM_DEBUG("\n"); ++ ++ /* The primary DMA stream should look like new right about now. ++ */ ++ primary->tail = 0; ++ primary->space = primary->size; ++ primary->last_flush = 0; ++ ++ sarea_priv->last_wrap = 0; ++ ++ /* FIXME: Reset counters, buffer ages etc... ++ */ ++ ++ /* FIXME: What else do we need to reinitialize? WARP stuff? ++ */ ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Primary DMA stream ++ */ ++ ++void mga_do_dma_flush(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ u32 head, tail; ++ u32 status = 0; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* We need to wait so that we can do an safe flush */ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ if (status == MGA_ENDPRDMASTS) ++ break; ++ DRM_UDELAY(1); ++ } ++ ++ if (primary->tail == primary->last_flush) { ++ DRM_DEBUG(" bailing out...\n"); ++ return; ++ } ++ ++ tail = primary->tail + dev_priv->primary->offset; ++ ++ /* We need to pad the stream between flushes, as the card ++ * actually (partially?) reads the first of these commands. ++ * See page 4-16 in the G400 manual, middle of the page or so. ++ */ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++ ++ primary->last_flush = primary->tail; ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ ++ if (head <= tail) { ++ primary->space = primary->size - primary->tail; ++ } else { ++ primary->space = head - tail; ++ } ++ ++ DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); ++ DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset); ++ DRM_DEBUG(" space = 0x%06x\n", primary->space); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); ++ ++ DRM_DEBUG("done.\n"); ++} ++ ++void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ u32 head, tail; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA_WRAP(); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++ ++ tail = primary->tail + dev_priv->primary->offset; ++ ++ primary->tail = 0; ++ primary->last_flush = 0; ++ primary->last_wrap++; ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ ++ if (head == dev_priv->primary->offset) { ++ primary->space = primary->size; ++ } else { ++ primary->space = head - dev_priv->primary->offset; ++ } ++ ++ DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset); ++ DRM_DEBUG(" tail = 0x%06x\n", primary->tail); ++ DRM_DEBUG(" wrap = %d\n", primary->last_wrap); ++ DRM_DEBUG(" space = 0x%06x\n", primary->space); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); ++ ++ set_bit(0, &primary->wrapped); ++ DRM_DEBUG("done.\n"); ++} ++ ++void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_primary_buffer_t *primary = &dev_priv->prim; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 head = dev_priv->primary->offset; ++ DRM_DEBUG("\n"); ++ ++ sarea_priv->last_wrap++; ++ DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap); ++ ++ mga_flush_write_combine(); ++ MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); ++ ++ clear_bit(0, &primary->wrapped); ++ DRM_DEBUG("done.\n"); ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++ ++#define MGA_BUFFER_USED ~0 ++#define MGA_BUFFER_FREE 0 ++ ++#if MGA_FREELIST_DEBUG ++static void mga_freelist_print(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *entry; ++ ++ DRM_INFO("\n"); ++ DRM_INFO("current dispatch: last=0x%x done=0x%x\n", ++ dev_priv->sarea_priv->last_dispatch, ++ (unsigned int)(MGA_READ(MGA_PRIMADDRESS) - ++ dev_priv->primary->offset)); ++ DRM_INFO("current freelist:\n"); ++ ++ for (entry = dev_priv->head->next; entry; entry = entry->next) { ++ DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n", ++ entry, entry->buf->idx, entry->age.head, ++ entry->age.head - dev_priv->primary->offset); ++ } ++ DRM_INFO("\n"); ++} ++#endif ++ ++static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_freelist_t *entry; ++ int i; ++ DRM_DEBUG("count=%d\n", dma->buf_count); ++ ++ dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ if (dev_priv->head == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t)); ++ SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ ++ entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ if (entry == NULL) ++ return -ENOMEM; ++ ++ memset(entry, 0, sizeof(drm_mga_freelist_t)); ++ ++ entry->next = dev_priv->head->next; ++ entry->prev = dev_priv->head; ++ SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); ++ entry->buf = buf; ++ ++ if (dev_priv->head->next != NULL) ++ dev_priv->head->next->prev = entry; ++ if (entry->next == NULL) ++ dev_priv->tail = entry; ++ ++ buf_priv->list_entry = entry; ++ buf_priv->discard = 0; ++ buf_priv->dispatched = 0; ++ ++ dev_priv->head->next = entry; ++ } ++ ++ return 0; ++} ++ ++static void mga_freelist_cleanup(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *entry; ++ drm_mga_freelist_t *next; ++ DRM_DEBUG("\n"); ++ ++ entry = dev_priv->head; ++ while (entry) { ++ next = entry->next; ++ drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER); ++ entry = next; ++ } ++ ++ dev_priv->head = dev_priv->tail = NULL; ++} ++ ++#if 0 ++/* FIXME: Still needed? ++ */ ++static void mga_freelist_reset(struct drm_device * dev) ++{ ++ drm_device_dma_t *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ int i; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0); ++ } ++} ++#endif ++ ++static struct drm_buf *mga_freelist_get(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_freelist_t *next; ++ drm_mga_freelist_t *prev; ++ drm_mga_freelist_t *tail = dev_priv->tail; ++ u32 head, wrap; ++ DRM_DEBUG("\n"); ++ ++ head = MGA_READ(MGA_PRIMADDRESS); ++ wrap = dev_priv->sarea_priv->last_wrap; ++ ++ DRM_DEBUG(" tail=0x%06lx %d\n", ++ tail->age.head ? ++ tail->age.head - dev_priv->primary->offset : 0, ++ tail->age.wrap); ++ DRM_DEBUG(" head=0x%06lx %d\n", ++ head - dev_priv->primary->offset, wrap); ++ ++ if (TEST_AGE(&tail->age, head, wrap)) { ++ prev = dev_priv->tail->prev; ++ next = dev_priv->tail; ++ prev->next = NULL; ++ next->prev = next->next = NULL; ++ dev_priv->tail = prev; ++ SET_AGE(&next->age, MGA_BUFFER_USED, 0); ++ return next->buf; ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_freelist_t *head, *entry, *prev; ++ ++ DRM_DEBUG("age=0x%06lx wrap=%d\n", ++ buf_priv->list_entry->age.head - ++ dev_priv->primary->offset, buf_priv->list_entry->age.wrap); ++ ++ entry = buf_priv->list_entry; ++ head = dev_priv->head; ++ ++ if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { ++ SET_AGE(&entry->age, MGA_BUFFER_FREE, 0); ++ prev = dev_priv->tail; ++ prev->next = entry; ++ entry->prev = prev; ++ entry->next = NULL; ++ } else { ++ prev = head->next; ++ head->next = entry; ++ prev->prev = entry; ++ entry->prev = head; ++ entry->next = prev; ++ } ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * DMA initialization, cleanup ++ */ ++ ++int mga_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ drm_mga_private_t *dev_priv; ++ ++ dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); ++ if (!dev_priv) ++ return -ENOMEM; ++ ++ dev->dev_private = (void *)dev_priv; ++ memset(dev_priv, 0, sizeof(drm_mga_private_t)); ++ ++ dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; ++ dev_priv->chipset = flags; ++ ++ dev_priv->mmio_base = drm_get_resource_start(dev, 1); ++ dev_priv->mmio_size = drm_get_resource_len(dev, 1); ++ ++ dev->counters += 3; ++ dev->types[6] = _DRM_STAT_IRQ; ++ dev->types[7] = _DRM_STAT_PRIMARY; ++ dev->types[8] = _DRM_STAT_SECONDARY; ++ ++ return 0; ++} ++ ++/** ++ * Bootstrap the driver for AGP DMA. ++ * ++ * \todo ++ * Investigate whether there is any benifit to storing the WARP microcode in ++ * AGP memory. If not, the microcode may as well always be put in PCI ++ * memory. ++ * ++ * \todo ++ * This routine needs to set dma_bs->agp_mode to the mode actually configured ++ * in the hardware. Looking just at the Linux AGP driver code, I don't see ++ * an easy way to determine this. ++ * ++ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap ++ */ ++static int mga_do_agp_dma_bootstrap(struct drm_device *dev, ++ drm_mga_dma_bootstrap_t * dma_bs) ++{ ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *)dev->dev_private; ++ unsigned int warp_size = mga_warp_microcode_size(dev_priv); ++ int err; ++ unsigned offset; ++ const unsigned secondary_size = dma_bs->secondary_bin_count ++ * dma_bs->secondary_bin_size; ++ const unsigned agp_size = (dma_bs->agp_size << 20); ++ struct drm_buf_desc req; ++ struct drm_agp_mode mode; ++ struct drm_agp_info info; ++ struct drm_agp_buffer agp_req; ++ struct drm_agp_binding bind_req; ++ ++ /* Acquire AGP. */ ++ err = drm_agp_acquire(dev); ++ if (err) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", err); ++ return err; ++ } ++ ++ err = drm_agp_info(dev, &info); ++ if (err) { ++ DRM_ERROR("Unable to get AGP info: %d\n", err); ++ return err; ++ } ++ ++ mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; ++ err = drm_agp_enable(dev, mode); ++ if (err) { ++ DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); ++ return err; ++ } ++ ++ /* In addition to the usual AGP mode configuration, the G200 AGP cards ++ * need to have the AGP mode "manually" set. ++ */ ++ ++ if (dev_priv->chipset == MGA_CARD_TYPE_G200) { ++ if (mode.mode & 0x02) { ++ MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); ++ } else { ++ MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); ++ } ++ } ++ ++ /* Allocate and bind AGP memory. */ ++ agp_req.size = agp_size; ++ agp_req.type = 0; ++ err = drm_agp_alloc(dev, &agp_req); ++ if (err) { ++ dev_priv->agp_size = 0; ++ DRM_ERROR("Unable to allocate %uMB AGP memory\n", ++ dma_bs->agp_size); ++ return err; ++ } ++ ++ dev_priv->agp_size = agp_size; ++ dev_priv->agp_handle = agp_req.handle; ++ ++ bind_req.handle = agp_req.handle; ++ bind_req.offset = 0; ++ err = drm_agp_bind( dev, &bind_req ); ++ if (err) { ++ DRM_ERROR("Unable to bind AGP memory: %d\n", err); ++ return err; ++ } ++ ++ /* Make drm_addbufs happy by not trying to create a mapping for less ++ * than a page. ++ */ ++ if (warp_size < PAGE_SIZE) ++ warp_size = PAGE_SIZE; ++ ++ offset = 0; ++ err = drm_addmap(dev, offset, warp_size, ++ _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); ++ if (err) { ++ DRM_ERROR("Unable to map WARP microcode: %d\n", err); ++ return err; ++ } ++ ++ offset += warp_size; ++ err = drm_addmap(dev, offset, dma_bs->primary_size, ++ _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary); ++ if (err) { ++ DRM_ERROR("Unable to map primary DMA region: %d\n", err); ++ return err; ++ } ++ ++ offset += dma_bs->primary_size; ++ err = drm_addmap(dev, offset, secondary_size, ++ _DRM_AGP, 0, & dev->agp_buffer_map); ++ if (err) { ++ DRM_ERROR("Unable to map secondary DMA region: %d\n", err); ++ return err; ++ } ++ ++ (void)memset( &req, 0, sizeof(req) ); ++ req.count = dma_bs->secondary_bin_count; ++ req.size = dma_bs->secondary_bin_size; ++ req.flags = _DRM_AGP_BUFFER; ++ req.agp_start = offset; ++ ++ err = drm_addbufs_agp(dev, &req); ++ if (err) { ++ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); ++ return err; ++ } ++ ++#ifdef __linux__ ++ { ++ struct drm_map_list *_entry; ++ unsigned long agp_token = 0; ++ ++ list_for_each_entry(_entry, &dev->maplist, head) { ++ if (_entry->map == dev->agp_buffer_map) ++ agp_token = _entry->user_token; ++ } ++ if (!agp_token) ++ return -EFAULT; ++ ++ dev->agp_buffer_token = agp_token; ++ } ++#endif ++ ++ offset += secondary_size; ++ err = drm_addmap(dev, offset, agp_size - offset, ++ _DRM_AGP, 0, & dev_priv->agp_textures); ++ if (err) { ++ DRM_ERROR("Unable to map AGP texture region: %d\n", err); ++ return err; ++ } ++ ++ drm_core_ioremap(dev_priv->warp, dev); ++ drm_core_ioremap(dev_priv->primary, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ ++ if (!dev_priv->warp->handle || ++ !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { ++ DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", ++ dev_priv->warp->handle, dev_priv->primary->handle, ++ dev->agp_buffer_map->handle); ++ return -ENOMEM; ++ } ++ ++ dev_priv->dma_access = MGA_PAGPXFER; ++ dev_priv->wagp_enable = MGA_WAGP_ENABLE; ++ ++ DRM_INFO("Initialized card for AGP DMA.\n"); ++ return 0; ++} ++ ++/** ++ * Bootstrap the driver for PCI DMA. ++ * ++ * \todo ++ * The algorithm for decreasing the size of the primary DMA buffer could be ++ * better. The size should be rounded up to the nearest page size, then ++ * decrease the request size by a single page each pass through the loop. ++ * ++ * \todo ++ * Determine whether the maximum address passed to drm_pci_alloc is correct. ++ * The same goes for drm_addbufs_pci. ++ * ++ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap ++ */ ++static int mga_do_pci_dma_bootstrap(struct drm_device * dev, ++ drm_mga_dma_bootstrap_t * dma_bs) ++{ ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ unsigned int warp_size = mga_warp_microcode_size(dev_priv); ++ unsigned int primary_size; ++ unsigned int bin_count; ++ int err; ++ struct drm_buf_desc req; ++ ++ ++ if (dev->dma == NULL) { ++ DRM_ERROR("dev->dma is NULL\n"); ++ return -EFAULT; ++ } ++ ++ /* Make drm_addbufs happy by not trying to create a mapping for less ++ * than a page. ++ */ ++ if (warp_size < PAGE_SIZE) ++ warp_size = PAGE_SIZE; ++ ++ /* The proper alignment is 0x100 for this mapping */ ++ err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, ++ _DRM_READ_ONLY, &dev_priv->warp); ++ if (err != 0) { ++ DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", ++ err); ++ return err; ++ } ++ ++ /* Other than the bottom two bits being used to encode other ++ * information, there don't appear to be any restrictions on the ++ * alignment of the primary or secondary DMA buffers. ++ */ ++ ++ for (primary_size = dma_bs->primary_size; primary_size != 0; ++ primary_size >>= 1 ) { ++ /* The proper alignment for this mapping is 0x04 */ ++ err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, ++ _DRM_READ_ONLY, &dev_priv->primary); ++ if (!err) ++ break; ++ } ++ ++ if (err != 0) { ++ DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); ++ return -ENOMEM; ++ } ++ ++ if (dev_priv->primary->size != dma_bs->primary_size) { ++ DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", ++ dma_bs->primary_size, ++ (unsigned)dev_priv->primary->size); ++ dma_bs->primary_size = dev_priv->primary->size; ++ } ++ ++ for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; ++ bin_count-- ) { ++ (void)memset(&req, 0, sizeof(req)); ++ req.count = bin_count; ++ req.size = dma_bs->secondary_bin_size; ++ ++ err = drm_addbufs_pci(dev, &req); ++ if (!err) { ++ break; ++ } ++ } ++ ++ if (bin_count == 0) { ++ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); ++ return err; ++ } ++ ++ if (bin_count != dma_bs->secondary_bin_count) { ++ DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " ++ "to %u.\n", dma_bs->secondary_bin_count, bin_count); ++ ++ dma_bs->secondary_bin_count = bin_count; ++ } ++ ++ dev_priv->dma_access = 0; ++ dev_priv->wagp_enable = 0; ++ ++ dma_bs->agp_mode = 0; ++ ++ DRM_INFO("Initialized card for PCI DMA.\n"); ++ return 0; ++} ++ ++ ++static int mga_do_dma_bootstrap(struct drm_device *dev, ++ drm_mga_dma_bootstrap_t *dma_bs) ++{ ++ const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); ++ int err; ++ drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ ++ dev_priv->used_new_dma_init = 1; ++ ++ /* The first steps are the same for both PCI and AGP based DMA. Map ++ * the cards MMIO registers and map a status page. ++ */ ++ err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, ++ _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio); ++ if (err) { ++ DRM_ERROR("Unable to map MMIO region: %d\n", err); ++ return err; ++ } ++ ++ ++ err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, ++ _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, ++ & dev_priv->status); ++ if (err) { ++ DRM_ERROR("Unable to map status region: %d\n", err); ++ return err; ++ } ++ ++ ++ /* The DMA initialization procedure is slightly different for PCI and ++ * AGP cards. AGP cards just allocate a large block of AGP memory and ++ * carve off portions of it for internal uses. The remaining memory ++ * is returned to user-mode to be used for AGP textures. ++ */ ++ ++ if (is_agp) { ++ err = mga_do_agp_dma_bootstrap(dev, dma_bs); ++ } ++ ++ /* If we attempted to initialize the card for AGP DMA but failed, ++ * clean-up any mess that may have been created. ++ */ ++ ++ if (err) { ++ mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); ++ } ++ ++ ++ /* Not only do we want to try and initialized PCI cards for PCI DMA, ++ * but we also try to initialized AGP cards that could not be ++ * initialized for AGP DMA. This covers the case where we have an AGP ++ * card in a system with an unsupported AGP chipset. In that case the ++ * card will be detected as AGP, but we won't be able to allocate any ++ * AGP memory, etc. ++ */ ++ ++ if (!is_agp || err) { ++ err = mga_do_pci_dma_bootstrap(dev, dma_bs); ++ } ++ ++ ++ return err; ++} ++ ++int mga_dma_bootstrap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_dma_bootstrap_t *bootstrap = data; ++ int err; ++ static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; ++ const drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ ++ err = mga_do_dma_bootstrap(dev, bootstrap); ++ if (err) { ++ mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ return err; ++ } ++ ++ if (dev_priv->agp_textures != NULL) { ++ bootstrap->texture_handle = dev_priv->agp_textures->offset; ++ bootstrap->texture_size = dev_priv->agp_textures->size; ++ } else { ++ bootstrap->texture_handle = 0; ++ bootstrap->texture_size = 0; ++ } ++ ++ bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; ++ ++ return 0; ++} ++ ++ ++static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) ++{ ++ drm_mga_private_t *dev_priv; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ ++ dev_priv = dev->dev_private; ++ ++ if (init->sgram) { ++ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; ++ } else { ++ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; ++ } ++ dev_priv->maccess = init->maccess; ++ ++ dev_priv->fb_cpp = init->fb_cpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ dev_priv->depth_cpp = init->depth_cpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ /* FIXME: Need to support AGP textures... ++ */ ++ dev_priv->texture_offset = init->texture_offset[0]; ++ dev_priv->texture_size = init->texture_size[0]; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("failed to find sarea!\n"); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->used_new_dma_init) { ++ ++ dev_priv->dma_access = MGA_PAGPXFER; ++ dev_priv->wagp_enable = MGA_WAGP_ENABLE; ++ ++ dev_priv->status = drm_core_findmap(dev, init->status_offset); ++ if (!dev_priv->status) { ++ DRM_ERROR("failed to find status page!\n"); ++ return -EINVAL; ++ } ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("failed to find mmio region!\n"); ++ return -EINVAL; ++ } ++ dev_priv->warp = drm_core_findmap(dev, init->warp_offset); ++ if (!dev_priv->warp) { ++ DRM_ERROR("failed to find warp microcode region!\n"); ++ return -EINVAL; ++ } ++ dev_priv->primary = drm_core_findmap(dev, init->primary_offset); ++ if (!dev_priv->primary) { ++ DRM_ERROR("failed to find primary dma region!\n"); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = ++ drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("failed to find dma buffer region!\n"); ++ return -EINVAL; ++ } ++ ++ drm_core_ioremap(dev_priv->warp, dev); ++ drm_core_ioremap(dev_priv->primary, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++ if (!dev_priv->warp->handle || ++ !dev_priv->primary->handle || ++ ((dev_priv->dma_access != 0) && ++ ((dev->agp_buffer_map == NULL) || ++ (dev->agp_buffer_map->handle == NULL)))) { ++ DRM_ERROR("failed to ioremap agp regions!\n"); ++ return -ENOMEM; ++ } ++ ++ ret = mga_warp_install_microcode(dev_priv); ++ if (ret != 0) { ++ DRM_ERROR("failed to install WARP ucode: %d!\n", ret); ++ return ret; ++ } ++ ++ ret = mga_warp_init(dev_priv); ++ if (ret != 0) { ++ DRM_ERROR("failed to init WARP engine: %d!\n", ret); ++ return ret; ++ } ++ ++ dev_priv->prim.status = (u32 *) dev_priv->status->handle; ++ ++ mga_do_wait_for_idle(dev_priv); ++ ++ /* Init the primary DMA registers. ++ */ ++ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); ++ ++ dev_priv->prim.start = (u8 *) dev_priv->primary->handle; ++ dev_priv->prim.end = ((u8 *) dev_priv->primary->handle ++ + dev_priv->primary->size); ++ dev_priv->prim.size = dev_priv->primary->size; ++ ++ dev_priv->prim.tail = 0; ++ dev_priv->prim.space = dev_priv->prim.size; ++ dev_priv->prim.wrapped = 0; ++ ++ dev_priv->prim.last_flush = 0; ++ dev_priv->prim.last_wrap = 0; ++ ++ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; ++ ++ dev_priv->prim.status[0] = dev_priv->primary->offset; ++ dev_priv->prim.status[1] = 0; ++ ++ dev_priv->sarea_priv->last_wrap = 0; ++ dev_priv->sarea_priv->last_frame.head = 0; ++ dev_priv->sarea_priv->last_frame.wrap = 0; ++ ++ if (mga_freelist_init(dev, dev_priv) < 0) { ++ DRM_ERROR("could not initialize freelist\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) ++{ ++ int err = 0; ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->warp != NULL) ++ && (dev_priv->warp->type != _DRM_CONSISTENT)) ++ drm_core_ioremapfree(dev_priv->warp, dev); ++ ++ if ((dev_priv->primary != NULL) ++ && (dev_priv->primary->type != _DRM_CONSISTENT)) ++ drm_core_ioremapfree(dev_priv->primary, dev); ++ ++ if (dev->agp_buffer_map != NULL) ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ ++ if (dev_priv->used_new_dma_init) { ++ if (dev_priv->agp_handle != 0) { ++ struct drm_agp_binding unbind_req; ++ struct drm_agp_buffer free_req; ++ ++ unbind_req.handle = dev_priv->agp_handle; ++ drm_agp_unbind(dev, &unbind_req); ++ ++ free_req.handle = dev_priv->agp_handle; ++ drm_agp_free(dev, &free_req); ++ ++ dev_priv->agp_textures = NULL; ++ dev_priv->agp_size = 0; ++ dev_priv->agp_handle = 0; ++ } ++ ++ if ((dev->agp != NULL) && dev->agp->acquired) { ++ err = drm_agp_release(dev); ++ } ++ } ++ ++ dev_priv->warp = NULL; ++ dev_priv->primary = NULL; ++ dev_priv->sarea = NULL; ++ dev_priv->sarea_priv = NULL; ++ dev->agp_buffer_map = NULL; ++ ++ if (full_cleanup) { ++ dev_priv->mmio = NULL; ++ dev_priv->status = NULL; ++ dev_priv->used_new_dma_init = 0; ++ } ++ ++ memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); ++ dev_priv->warp_pipe = 0; ++ memset(dev_priv->warp_pipe_phys, 0, ++ sizeof(dev_priv->warp_pipe_phys)); ++ ++ if (dev_priv->head != NULL) { ++ mga_freelist_cleanup(dev); ++ } ++ } ++ ++ return err; ++} ++ ++int mga_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_init_t *init = data; ++ int err; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case MGA_INIT_DMA: ++ err = mga_do_init_dma(dev, init); ++ if (err) { ++ (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ } ++ return err; ++ case MGA_CLEANUP_DMA: ++ return mga_do_cleanup_dma(dev, FULL_CLEANUP); ++ } ++ ++ return -EINVAL; ++} ++ ++/* ================================================================ ++ * Primary DMA stream management ++ */ ++ ++int mga_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ struct drm_lock *lock = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("%s%s%s\n", ++ (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", ++ (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", ++ (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); ++ ++ WRAP_WAIT_WITH_RETURN(dev_priv); ++ ++ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { ++ mga_do_dma_flush(dev_priv); ++ } ++ ++ if (lock->flags & _DRM_LOCK_QUIESCENT) { ++#if MGA_DMA_DEBUG ++ int ret = mga_do_wait_for_idle(dev_priv); ++ if (ret < 0) ++ DRM_INFO("-EBUSY\n"); ++ return ret; ++#else ++ return mga_do_wait_for_idle(dev_priv); ++#endif ++ } else { ++ return 0; ++ } ++} ++ ++int mga_dma_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return mga_do_dma_reset(dev_priv); ++} ++ ++/* ================================================================ ++ * DMA buffer management ++ */ ++ ++static int mga_dma_get_buffers(struct drm_device * dev, ++ struct drm_file *file_priv, struct drm_dma * d) ++{ ++ struct drm_buf *buf; ++ int i; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = mga_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], ++ &buf->idx, sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], ++ &buf->total, sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int mga_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = mga_dma_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++/** ++ * Called just before the module is unloaded. ++ */ ++int mga_driver_unload(struct drm_device * dev) ++{ ++ drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ ++ return 0; ++} ++ ++/** ++ * Called when the last opener of the device is closed. ++ */ ++void mga_driver_lastclose(struct drm_device * dev) ++{ ++ mga_do_cleanup_dma(dev, FULL_CLEANUP); ++} ++ ++int mga_driver_dma_quiescent(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ return mga_do_wait_for_idle(dev_priv); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/mga_drm.h git-nokia/drivers/gpu/drm-tungsten/mga_drm.h +--- git/drivers/gpu/drm-tungsten/mga_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,425 @@ ++/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*- ++ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ * Rewritten by: ++ * Gareth Hughes ++ */ ++ ++#ifndef __MGA_DRM_H__ ++#define __MGA_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (mga_sarea.h) ++ */ ++ ++#ifndef __MGA_SAREA_DEFINES__ ++#define __MGA_SAREA_DEFINES__ ++ ++/* WARP pipe flags ++ */ ++#define MGA_F 0x1 /* fog */ ++#define MGA_A 0x2 /* alpha */ ++#define MGA_S 0x4 /* specular */ ++#define MGA_T2 0x8 /* multitexture */ ++ ++#define MGA_WARP_TGZ 0 ++#define MGA_WARP_TGZF (MGA_F) ++#define MGA_WARP_TGZA (MGA_A) ++#define MGA_WARP_TGZAF (MGA_F|MGA_A) ++#define MGA_WARP_TGZS (MGA_S) ++#define MGA_WARP_TGZSF (MGA_S|MGA_F) ++#define MGA_WARP_TGZSA (MGA_S|MGA_A) ++#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A) ++#define MGA_WARP_T2GZ (MGA_T2) ++#define MGA_WARP_T2GZF (MGA_T2|MGA_F) ++#define MGA_WARP_T2GZA (MGA_T2|MGA_A) ++#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F) ++#define MGA_WARP_T2GZS (MGA_T2|MGA_S) ++#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F) ++#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A) ++#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A) ++ ++#define MGA_MAX_G200_PIPES 8 /* no multitex */ ++#define MGA_MAX_G400_PIPES 16 ++#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES ++#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */ ++ ++#define MGA_CARD_TYPE_G200 1 ++#define MGA_CARD_TYPE_G400 2 ++#define MGA_CARD_TYPE_G450 3 /* not currently used */ ++#define MGA_CARD_TYPE_G550 4 ++ ++#define MGA_FRONT 0x1 ++#define MGA_BACK 0x2 ++#define MGA_DEPTH 0x4 ++ ++/* What needs to be changed for the current vertex dma buffer? ++ */ ++#define MGA_UPLOAD_CONTEXT 0x1 ++#define MGA_UPLOAD_TEX0 0x2 ++#define MGA_UPLOAD_TEX1 0x4 ++#define MGA_UPLOAD_PIPE 0x8 ++#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */ ++#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */ ++#define MGA_UPLOAD_2D 0x40 ++#define MGA_WAIT_AGE 0x80 /* handled client-side */ ++#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */ ++#if 0 ++#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock ++ quiescent */ ++#endif ++ ++/* 32 buffers of 64k each, total 2 meg. ++ */ ++#define MGA_BUFFER_SIZE (1 << 16) ++#define MGA_NUM_BUFFERS 128 ++ ++/* Keep these small for testing. ++ */ ++#define MGA_NR_SAREA_CLIPRECTS 8 ++ ++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 ++ * regions, subject to a minimum region size of (1<<16) == 64k. ++ * ++ * Clients may subdivide regions internally, but when sharing between ++ * clients, the region size is the minimum granularity. ++ */ ++ ++#define MGA_CARD_HEAP 0 ++#define MGA_AGP_HEAP 1 ++#define MGA_NR_TEX_HEAPS 2 ++#define MGA_NR_TEX_REGIONS 16 ++#define MGA_LOG_MIN_TEX_REGION_SIZE 16 ++ ++#define DRM_MGA_IDLE_RETRY 2048 ++ ++#endif /* __MGA_SAREA_DEFINES__ */ ++ ++/* Setup registers for 3D context ++ */ ++typedef struct { ++ unsigned int dstorg; ++ unsigned int maccess; ++ unsigned int plnwt; ++ unsigned int dwgctl; ++ unsigned int alphactrl; ++ unsigned int fogcolor; ++ unsigned int wflag; ++ unsigned int tdualstage0; ++ unsigned int tdualstage1; ++ unsigned int fcol; ++ unsigned int stencil; ++ unsigned int stencilctl; ++} drm_mga_context_regs_t; ++ ++/* Setup registers for 2D, X server ++ */ ++typedef struct { ++ unsigned int pitch; ++} drm_mga_server_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int texctl; ++ unsigned int texctl2; ++ unsigned int texfilter; ++ unsigned int texbordercol; ++ unsigned int texorg; ++ unsigned int texwidth; ++ unsigned int texheight; ++ unsigned int texorg1; ++ unsigned int texorg2; ++ unsigned int texorg3; ++ unsigned int texorg4; ++} drm_mga_texture_regs_t; ++ ++/* General aging mechanism ++ */ ++typedef struct { ++ unsigned int head; /* Position of head pointer */ ++ unsigned int wrap; /* Primary DMA wrap count */ ++} drm_mga_age_t; ++ ++typedef struct _drm_mga_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex dma buffer. ++ */ ++ drm_mga_context_regs_t context_state; ++ drm_mga_server_regs_t server_state; ++ drm_mga_texture_regs_t tex_state[2]; ++ unsigned int warp_pipe; ++ unsigned int dirty; ++ unsigned int vertsize; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Information about the most recently used 3d drawable. The ++ * client fills in the req_* fields, the server fills in the ++ * exported_ fields and puts the cliprects into boxes, above. ++ * ++ * The client clears the exported_drawable field before ++ * clobbering the boxes data. ++ */ ++ unsigned int req_drawable; /* the X drawable id */ ++ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */ ++ ++ unsigned int exported_drawable; ++ unsigned int exported_index; ++ unsigned int exported_stamp; ++ unsigned int exported_buffers; ++ unsigned int exported_nfront; ++ unsigned int exported_nback; ++ int exported_back_x, exported_front_x, exported_w; ++ int exported_back_y, exported_front_y, exported_h; ++ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS]; ++ ++ /* Counters for aging textures and for client-side throttling. ++ */ ++ unsigned int status[4]; ++ unsigned int last_wrap; ++ ++ drm_mga_age_t last_frame; ++ unsigned int last_enqueue; /* last time a buffer was enqueued */ ++ unsigned int last_dispatch; /* age of the most recently dispatched buffer */ ++ unsigned int last_quiescent; /* */ ++ ++ /* LRU lists for texture memory in agp space and on the card. ++ */ ++ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1]; ++ unsigned int texAge[MGA_NR_TEX_HEAPS]; ++ ++ /* Mechanism to validate card state. ++ */ ++ int ctxOwner; ++} drm_mga_sarea_t; ++ ++ ++/* MGA specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_MGA_INIT 0x00 ++#define DRM_MGA_FLUSH 0x01 ++#define DRM_MGA_RESET 0x02 ++#define DRM_MGA_SWAP 0x03 ++#define DRM_MGA_CLEAR 0x04 ++#define DRM_MGA_VERTEX 0x05 ++#define DRM_MGA_INDICES 0x06 ++#define DRM_MGA_ILOAD 0x07 ++#define DRM_MGA_BLIT 0x08 ++#define DRM_MGA_GETPARAM 0x09 ++ ++/* 3.2: ++ * ioctls for operating on fences. ++ */ ++#define DRM_MGA_SET_FENCE 0x0a ++#define DRM_MGA_WAIT_FENCE 0x0b ++#define DRM_MGA_DMA_BOOTSTRAP 0x0c ++ ++ ++#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) ++#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) ++#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) ++#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) ++#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) ++#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t) ++#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t) ++#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) ++#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) ++#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) ++#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t) ++#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t) ++#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t) ++ ++typedef struct _drm_mga_warp_index { ++ int installed; ++ unsigned long phys_addr; ++ int size; ++} drm_mga_warp_index_t; ++ ++typedef struct drm_mga_init { ++ enum { ++ MGA_INIT_DMA = 0x01, ++ MGA_CLEANUP_DMA = 0x02 ++ } func; ++ ++ unsigned long sarea_priv_offset; ++ ++ int chipset; ++ int sgram; ++ ++ unsigned int maccess; ++ ++ unsigned int fb_cpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ ++ unsigned int depth_cpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned int texture_offset[MGA_NR_TEX_HEAPS]; ++ unsigned int texture_size[MGA_NR_TEX_HEAPS]; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long status_offset; ++ unsigned long warp_offset; ++ unsigned long primary_offset; ++ unsigned long buffers_offset; ++} drm_mga_init_t; ++ ++ ++typedef struct drm_mga_dma_bootstrap { ++ /** ++ * \name AGP texture region ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will ++ * be filled in with the actual AGP texture settings. ++ * ++ * \warning ++ * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode ++ * is zero, it means that PCI memory (most likely through the use of ++ * an IOMMU) is being used for "AGP" textures. ++ */ ++ /*@{*/ ++ unsigned long texture_handle; /**< Handle used to map AGP textures. */ ++ uint32_t texture_size; /**< Size of the AGP texture region. */ ++ /*@}*/ ++ ++ ++ /** ++ * Requested size of the primary DMA region. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual AGP mode. If AGP was not available ++ */ ++ uint32_t primary_size; ++ ++ ++ /** ++ * Requested number of secondary DMA buffers. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual number of secondary DMA buffers ++ * allocated. Particularly when PCI DMA is used, this may be ++ * (subtantially) less than the number requested. ++ */ ++ uint32_t secondary_bin_count; ++ ++ ++ /** ++ * Requested size of each secondary DMA buffer. ++ * ++ * While the kernel \b is free to reduce ++ * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed ++ * to reduce dma_mga_dma_bootstrap::secondary_bin_size. ++ */ ++ uint32_t secondary_bin_size; ++ ++ ++ /** ++ * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X, ++ * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is ++ * zero, it means that PCI DMA should be used, even if AGP is ++ * possible. ++ * ++ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be ++ * filled in with the actual AGP mode. If AGP was not available ++ * (i.e., PCI DMA was used), this value will be zero. ++ */ ++ uint32_t agp_mode; ++ ++ ++ /** ++ * Desired AGP GART size, measured in megabytes. ++ */ ++ uint8_t agp_size; ++} drm_mga_dma_bootstrap_t; ++ ++typedef struct drm_mga_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; ++} drm_mga_clear_t; ++ ++typedef struct drm_mga_vertex { ++ int idx; /* buffer to queue */ ++ int used; /* bytes in use */ ++ int discard; /* client finished with buffer? */ ++} drm_mga_vertex_t; ++ ++typedef struct drm_mga_indices { ++ int idx; /* buffer to queue */ ++ unsigned int start; ++ unsigned int end; ++ int discard; /* client finished with buffer? */ ++} drm_mga_indices_t; ++ ++typedef struct drm_mga_iload { ++ int idx; ++ unsigned int dstorg; ++ unsigned int length; ++} drm_mga_iload_t; ++ ++typedef struct _drm_mga_blit { ++ unsigned int planemask; ++ unsigned int srcorg; ++ unsigned int dstorg; ++ int src_pitch, dst_pitch; ++ int delta_sx, delta_sy; ++ int delta_dx, delta_dy; ++ int height, ydir; /* flip image vertically */ ++ int source_pitch, dest_pitch; ++} drm_mga_blit_t; ++ ++/* 3.1: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define MGA_PARAM_IRQ_NR 1 ++ ++/* 3.2: Query the actual card type. The DDX only distinguishes between ++ * G200 chips and non-G200 chips, which it calls G400. It turns out that ++ * there are some very sublte differences between the G4x0 chips and the G550 ++ * chips. Using this parameter query, a client-side driver can detect the ++ * difference between a G4x0 and a G550. ++ */ ++#define MGA_PARAM_CARD_TYPE 2 ++ ++typedef struct drm_mga_getparam { ++ int param; ++ void __user *value; ++} drm_mga_getparam_t; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.c git-nokia/drivers/gpu/drm-tungsten/mga_drv.c +--- git/drivers/gpu/drm-tungsten/mga_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,152 @@ ++/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++#include "drm_pciids.h" ++ ++static int mga_driver_device_is_agp(struct drm_device * dev); ++ ++static struct pci_device_id pciidlist[] = { ++ mga_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | ++ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof (drm_mga_buf_priv_t), ++ .load = mga_driver_load, ++ .unload = mga_driver_unload, ++ .lastclose = mga_driver_lastclose, ++ .dma_quiescent = mga_driver_dma_quiescent, ++ .device_is_agp = mga_driver_device_is_agp, ++ .get_vblank_counter = mga_get_vblank_counter, ++ .enable_vblank = mga_enable_vblank, ++ .disable_vblank = mga_disable_vblank, ++ .irq_preinstall = mga_driver_irq_preinstall, ++ .irq_postinstall = mga_driver_irq_postinstall, ++ .irq_uninstall = mga_driver_irq_uninstall, ++ .irq_handler = mga_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = mga_ioctls, ++ .dma_ioctl = mga_dma_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = mga_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init mga_init(void) ++{ ++ driver.num_ioctls = mga_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit mga_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(mga_init); ++module_exit(mga_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); ++ ++/** ++ * Determine if the device really is AGP or not. ++ * ++ * In addition to the usual tests performed by \c drm_device_is_agp, this ++ * function detects PCI G450 cards that appear to the system exactly like ++ * AGP G450 cards. ++ * ++ * \param dev The device to be tested. ++ * ++ * \returns ++ * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. ++ */ ++static int mga_driver_device_is_agp(struct drm_device * dev) ++{ ++ const struct pci_dev * const pdev = dev->pdev; ++ ++ ++ /* There are PCI versions of the G450. These cards have the ++ * same PCI ID as the AGP G450, but have an additional PCI-to-PCI ++ * bridge chip. We detect these cards, which are not currently ++ * supported by this driver, by looking at the device ID of the ++ * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the ++ * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the ++ * device. ++ */ ++ ++ if ((pdev->device == 0x0525) && pdev->bus->self ++ && (pdev->bus->self->vendor == 0x3388) ++ && (pdev->bus->self->device == 0x0021)) { ++ return 0; ++ } ++ ++ return 2; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.h git-nokia/drivers/gpu/drm-tungsten/mga_drv.h +--- git/drivers/gpu/drm-tungsten/mga_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,691 @@ ++/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*- ++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#ifndef __MGA_DRV_H__ ++#define __MGA_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." ++ ++#define DRIVER_NAME "mga" ++#define DRIVER_DESC "Matrox G200/G400" ++#define DRIVER_DATE "20060319" ++ ++#define DRIVER_MAJOR 3 ++#define DRIVER_MINOR 2 ++#define DRIVER_PATCHLEVEL 2 ++ ++typedef struct drm_mga_primary_buffer { ++ u8 *start; ++ u8 *end; ++ int size; ++ ++ u32 tail; ++ int space; ++ volatile long wrapped; ++ ++ volatile u32 *status; ++ ++ u32 last_flush; ++ u32 last_wrap; ++ ++ u32 high_mark; ++} drm_mga_primary_buffer_t; ++ ++typedef struct drm_mga_freelist { ++ struct drm_mga_freelist *next; ++ struct drm_mga_freelist *prev; ++ drm_mga_age_t age; ++ struct drm_buf *buf; ++} drm_mga_freelist_t; ++ ++typedef struct { ++ drm_mga_freelist_t *list_entry; ++ int discard; ++ int dispatched; ++} drm_mga_buf_priv_t; ++ ++typedef struct drm_mga_private { ++ drm_mga_primary_buffer_t prim; ++ drm_mga_sarea_t *sarea_priv; ++ ++ drm_mga_freelist_t *head; ++ drm_mga_freelist_t *tail; ++ ++ unsigned int warp_pipe; ++ unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES]; ++ ++ int chipset; ++ int usec_timeout; ++ ++ /** ++ * If set, the new DMA initialization sequence was used. This is ++ * primarilly used to select how the driver should uninitialized its ++ * internal DMA structures. ++ */ ++ int used_new_dma_init; ++ ++ /** ++ * If AGP memory is used for DMA buffers, this will be the value ++ * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer). ++ */ ++ u32 dma_access; ++ ++ /** ++ * If AGP memory is used for DMA buffers, this will be the value ++ * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI ++ * transfer). ++ */ ++ u32 wagp_enable; ++ ++ /** ++ * \name MMIO region parameters. ++ * ++ * \sa drm_mga_private_t::mmio ++ */ ++ /*@{*/ ++ u32 mmio_base; /**< Bus address of base of MMIO. */ ++ u32 mmio_size; /**< Size of the MMIO region. */ ++ /*@}*/ ++ ++ u32 clear_cmd; ++ u32 maccess; ++ ++ atomic_t vbl_received; /**< Number of vblanks received. */ ++ wait_queue_head_t fence_queue; ++ atomic_t last_fence_retired; ++ u32 next_fence_to_post; ++ ++ unsigned int fb_cpp; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ unsigned int depth_cpp; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *status; ++ drm_local_map_t *warp; ++ drm_local_map_t *primary; ++ drm_local_map_t *agp_textures; ++ ++ unsigned long agp_handle; ++ unsigned int agp_size; ++} drm_mga_private_t; ++ ++extern struct drm_ioctl_desc mga_ioctls[]; ++extern int mga_max_ioctl; ++ ++ /* mga_dma.c */ ++extern int mga_dma_bootstrap(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_flush(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_reset(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_dma_buffers(struct drm_device *dev, void *data, ++ struct drm_file *file_priv); ++extern int mga_driver_load(struct drm_device *dev, unsigned long flags); ++extern int mga_driver_unload(struct drm_device * dev); ++extern void mga_driver_lastclose(struct drm_device * dev); ++extern int mga_driver_dma_quiescent(struct drm_device * dev); ++ ++extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); ++ ++extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); ++extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); ++extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); ++ ++extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); ++ ++ /* mga_warp.c */ ++extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); ++extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); ++extern int mga_warp_init(drm_mga_private_t * dev_priv); ++ ++ /* mga_irq.c */ ++extern int mga_enable_vblank(struct drm_device *dev, int crtc); ++extern void mga_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); ++extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); ++extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); ++extern void mga_driver_irq_preinstall(struct drm_device * dev); ++extern int mga_driver_irq_postinstall(struct drm_device * dev); ++extern void mga_driver_irq_uninstall(struct drm_device * dev); ++extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() ++ ++#if defined(__linux__) && defined(__alpha__) ++#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) ++#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) ++ ++#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) ++#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) ++ ++#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) ++#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) ++#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) ++#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) ++ ++static inline u32 _MGA_READ(u32 * addr) ++{ ++ DRM_MEMORYBARRIER(); ++ return *(volatile u32 *)addr; ++} ++#else ++#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg)) ++#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg)) ++#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val)) ++#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) ++#endif ++ ++#define DWGREG0 0x1c00 ++#define DWGREG0_END 0x1dff ++#define DWGREG1 0x2c00 ++#define DWGREG1_END 0x2dff ++ ++#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END) ++#define DMAREG0(r) (u8)((r - DWGREG0) >> 2) ++#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80) ++#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r)) ++ ++/* ================================================================ ++ * Helper macross... ++ */ ++ ++#define MGA_EMIT_STATE( dev_priv, dirty ) \ ++do { \ ++ if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ ++ if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ ++ mga_g400_emit_state( dev_priv ); \ ++ } else { \ ++ mga_g200_emit_state( dev_priv ); \ ++ } \ ++ } \ ++} while (0) ++ ++#define WRAP_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( mga_is_idle( dev_priv ) ) { \ ++ mga_do_dma_wrap_end( dev_priv ); \ ++ } else if ( dev_priv->prim.space < \ ++ dev_priv->prim.high_mark ) { \ ++ if ( MGA_DMA_DEBUG ) \ ++ DRM_INFO( "wrap...\n"); \ ++ return -EBUSY; \ ++ } \ ++ } \ ++} while (0) ++ ++#define WRAP_WAIT_WITH_RETURN( dev_priv ) \ ++do { \ ++ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ ++ if ( MGA_DMA_DEBUG ) \ ++ DRM_INFO( "wrap...\n"); \ ++ return -EBUSY; \ ++ } \ ++ mga_do_dma_wrap_end( dev_priv ); \ ++ } \ ++} while (0) ++ ++/* ================================================================ ++ * Primary DMA command stream ++ */ ++ ++#define MGA_VERBOSE 0 ++ ++#define DMA_LOCALS unsigned int write; volatile u8 *prim; ++ ++#define DMA_BLOCK_SIZE (5 * sizeof(u32)) ++ ++#define BEGIN_DMA( n ) \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \ ++ DRM_INFO( " space=0x%x req=0x%Zx\n", \ ++ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ ++ } \ ++ prim = dev_priv->prim.start; \ ++ write = dev_priv->prim.tail; \ ++} while (0) ++ ++#define BEGIN_DMA_WRAP() \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_DMA()\n" ); \ ++ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ ++ } \ ++ prim = dev_priv->prim.start; \ ++ write = dev_priv->prim.tail; \ ++} while (0) ++ ++#define ADVANCE_DMA() \ ++do { \ ++ dev_priv->prim.tail = write; \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ ++ write, dev_priv->prim.space ); \ ++ } \ ++} while (0) ++ ++#define FLUSH_DMA() \ ++do { \ ++ if ( 0 ) { \ ++ DRM_INFO( "\n" ); \ ++ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ ++ dev_priv->prim.tail, \ ++ MGA_READ( MGA_PRIMADDRESS ) - \ ++ dev_priv->primary->offset ); \ ++ } \ ++ if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \ ++ if ( dev_priv->prim.space < \ ++ dev_priv->prim.high_mark ) { \ ++ mga_do_dma_wrap_start( dev_priv ); \ ++ } else { \ ++ mga_do_dma_flush( dev_priv ); \ ++ } \ ++ } \ ++} while (0) ++ ++/* Never use this, always use DMA_BLOCK(...) for primary DMA output. ++ */ ++#define DMA_WRITE( offset, val ) \ ++do { \ ++ if ( MGA_VERBOSE ) { \ ++ DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ ++ (u32)(val), write + (offset) * sizeof(u32) ); \ ++ } \ ++ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ ++} while (0) ++ ++#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \ ++do { \ ++ DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \ ++ (DMAREG( reg1 ) << 8) | \ ++ (DMAREG( reg2 ) << 16) | \ ++ (DMAREG( reg3 ) << 24)) ); \ ++ DMA_WRITE( 1, val0 ); \ ++ DMA_WRITE( 2, val1 ); \ ++ DMA_WRITE( 3, val2 ); \ ++ DMA_WRITE( 4, val3 ); \ ++ write += DMA_BLOCK_SIZE; \ ++} while (0) ++ ++/* Buffer aging via primary DMA stream head pointer. ++ */ ++ ++#define SET_AGE( age, h, w ) \ ++do { \ ++ (age)->head = h; \ ++ (age)->wrap = w; \ ++} while (0) ++ ++#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \ ++ ( (age)->wrap == w && \ ++ (age)->head < h ) ) ++ ++#define AGE_BUFFER( buf_priv ) \ ++do { \ ++ drm_mga_freelist_t *entry = (buf_priv)->list_entry; \ ++ if ( (buf_priv)->dispatched ) { \ ++ entry->age.head = (dev_priv->prim.tail + \ ++ dev_priv->primary->offset); \ ++ entry->age.wrap = dev_priv->sarea_priv->last_wrap; \ ++ } else { \ ++ entry->age.head = 0; \ ++ entry->age.wrap = 0; \ ++ } \ ++} while (0) ++ ++#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \ ++ MGA_DWGENGSTS | \ ++ MGA_ENDPRDMASTS) ++#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \ ++ MGA_ENDPRDMASTS) ++ ++#define MGA_DMA_DEBUG 0 ++ ++/* A reduced set of the mga registers. ++ */ ++#define MGA_CRTC_INDEX 0x1fd4 ++#define MGA_CRTC_DATA 0x1fd5 ++ ++/* CRTC11 */ ++#define MGA_VINTCLR (1 << 4) ++#define MGA_VINTEN (1 << 5) ++ ++#define MGA_ALPHACTRL 0x2c7c ++#define MGA_AR0 0x1c60 ++#define MGA_AR1 0x1c64 ++#define MGA_AR2 0x1c68 ++#define MGA_AR3 0x1c6c ++#define MGA_AR4 0x1c70 ++#define MGA_AR5 0x1c74 ++#define MGA_AR6 0x1c78 ++ ++#define MGA_CXBNDRY 0x1c80 ++#define MGA_CXLEFT 0x1ca0 ++#define MGA_CXRIGHT 0x1ca4 ++ ++#define MGA_DMAPAD 0x1c54 ++#define MGA_DSTORG 0x2cb8 ++#define MGA_DWGCTL 0x1c00 ++# define MGA_OPCOD_MASK (15 << 0) ++# define MGA_OPCOD_TRAP (4 << 0) ++# define MGA_OPCOD_TEXTURE_TRAP (6 << 0) ++# define MGA_OPCOD_BITBLT (8 << 0) ++# define MGA_OPCOD_ILOAD (9 << 0) ++# define MGA_ATYPE_MASK (7 << 4) ++# define MGA_ATYPE_RPL (0 << 4) ++# define MGA_ATYPE_RSTR (1 << 4) ++# define MGA_ATYPE_ZI (3 << 4) ++# define MGA_ATYPE_BLK (4 << 4) ++# define MGA_ATYPE_I (7 << 4) ++# define MGA_LINEAR (1 << 7) ++# define MGA_ZMODE_MASK (7 << 8) ++# define MGA_ZMODE_NOZCMP (0 << 8) ++# define MGA_ZMODE_ZE (2 << 8) ++# define MGA_ZMODE_ZNE (3 << 8) ++# define MGA_ZMODE_ZLT (4 << 8) ++# define MGA_ZMODE_ZLTE (5 << 8) ++# define MGA_ZMODE_ZGT (6 << 8) ++# define MGA_ZMODE_ZGTE (7 << 8) ++# define MGA_SOLID (1 << 11) ++# define MGA_ARZERO (1 << 12) ++# define MGA_SGNZERO (1 << 13) ++# define MGA_SHIFTZERO (1 << 14) ++# define MGA_BOP_MASK (15 << 16) ++# define MGA_BOP_ZERO (0 << 16) ++# define MGA_BOP_DST (10 << 16) ++# define MGA_BOP_SRC (12 << 16) ++# define MGA_BOP_ONE (15 << 16) ++# define MGA_TRANS_SHIFT 20 ++# define MGA_TRANS_MASK (15 << 20) ++# define MGA_BLTMOD_MASK (15 << 25) ++# define MGA_BLTMOD_BMONOLEF (0 << 25) ++# define MGA_BLTMOD_BMONOWF (4 << 25) ++# define MGA_BLTMOD_PLAN (1 << 25) ++# define MGA_BLTMOD_BFCOL (2 << 25) ++# define MGA_BLTMOD_BU32BGR (3 << 25) ++# define MGA_BLTMOD_BU32RGB (7 << 25) ++# define MGA_BLTMOD_BU24BGR (11 << 25) ++# define MGA_BLTMOD_BU24RGB (15 << 25) ++# define MGA_PATTERN (1 << 29) ++# define MGA_TRANSC (1 << 30) ++# define MGA_CLIPDIS (1 << 31) ++#define MGA_DWGSYNC 0x2c4c ++ ++#define MGA_FCOL 0x1c24 ++#define MGA_FIFOSTATUS 0x1e10 ++#define MGA_FOGCOL 0x1cf4 ++#define MGA_FXBNDRY 0x1c84 ++#define MGA_FXLEFT 0x1ca8 ++#define MGA_FXRIGHT 0x1cac ++ ++#define MGA_ICLEAR 0x1e18 ++# define MGA_SOFTRAPICLR (1 << 0) ++# define MGA_VLINEICLR (1 << 5) ++#define MGA_IEN 0x1e1c ++# define MGA_SOFTRAPIEN (1 << 0) ++# define MGA_VLINEIEN (1 << 5) ++ ++#define MGA_LEN 0x1c5c ++ ++#define MGA_MACCESS 0x1c04 ++ ++#define MGA_PITCH 0x1c8c ++#define MGA_PLNWT 0x1c1c ++#define MGA_PRIMADDRESS 0x1e58 ++# define MGA_DMA_GENERAL (0 << 0) ++# define MGA_DMA_BLIT (1 << 0) ++# define MGA_DMA_VECTOR (2 << 0) ++# define MGA_DMA_VERTEX (3 << 0) ++#define MGA_PRIMEND 0x1e5c ++# define MGA_PRIMNOSTART (1 << 0) ++# define MGA_PAGPXFER (1 << 1) ++#define MGA_PRIMPTR 0x1e50 ++# define MGA_PRIMPTREN0 (1 << 0) ++# define MGA_PRIMPTREN1 (1 << 1) ++ ++#define MGA_RST 0x1e40 ++# define MGA_SOFTRESET (1 << 0) ++# define MGA_SOFTEXTRST (1 << 1) ++ ++#define MGA_SECADDRESS 0x2c40 ++#define MGA_SECEND 0x2c44 ++#define MGA_SETUPADDRESS 0x2cd0 ++#define MGA_SETUPEND 0x2cd4 ++#define MGA_SGN 0x1c58 ++#define MGA_SOFTRAP 0x2c48 ++#define MGA_SRCORG 0x2cb4 ++# define MGA_SRMMAP_MASK (1 << 0) ++# define MGA_SRCMAP_FB (0 << 0) ++# define MGA_SRCMAP_SYSMEM (1 << 0) ++# define MGA_SRCACC_MASK (1 << 1) ++# define MGA_SRCACC_PCI (0 << 1) ++# define MGA_SRCACC_AGP (1 << 1) ++#define MGA_STATUS 0x1e14 ++# define MGA_SOFTRAPEN (1 << 0) ++# define MGA_VSYNCPEN (1 << 4) ++# define MGA_VLINEPEN (1 << 5) ++# define MGA_DWGENGSTS (1 << 16) ++# define MGA_ENDPRDMASTS (1 << 17) ++#define MGA_STENCIL 0x2cc8 ++#define MGA_STENCILCTL 0x2ccc ++ ++#define MGA_TDUALSTAGE0 0x2cf8 ++#define MGA_TDUALSTAGE1 0x2cfc ++#define MGA_TEXBORDERCOL 0x2c5c ++#define MGA_TEXCTL 0x2c30 ++#define MGA_TEXCTL2 0x2c3c ++# define MGA_DUALTEX (1 << 7) ++# define MGA_G400_TC2_MAGIC (1 << 15) ++# define MGA_MAP1_ENABLE (1 << 31) ++#define MGA_TEXFILTER 0x2c58 ++#define MGA_TEXHEIGHT 0x2c2c ++#define MGA_TEXORG 0x2c24 ++# define MGA_TEXORGMAP_MASK (1 << 0) ++# define MGA_TEXORGMAP_FB (0 << 0) ++# define MGA_TEXORGMAP_SYSMEM (1 << 0) ++# define MGA_TEXORGACC_MASK (1 << 1) ++# define MGA_TEXORGACC_PCI (0 << 1) ++# define MGA_TEXORGACC_AGP (1 << 1) ++#define MGA_TEXORG1 0x2ca4 ++#define MGA_TEXORG2 0x2ca8 ++#define MGA_TEXORG3 0x2cac ++#define MGA_TEXORG4 0x2cb0 ++#define MGA_TEXTRANS 0x2c34 ++#define MGA_TEXTRANSHIGH 0x2c38 ++#define MGA_TEXWIDTH 0x2c28 ++ ++#define MGA_WACCEPTSEQ 0x1dd4 ++#define MGA_WCODEADDR 0x1e6c ++#define MGA_WFLAG 0x1dc4 ++#define MGA_WFLAG1 0x1de0 ++#define MGA_WFLAGNB 0x1e64 ++#define MGA_WFLAGNB1 0x1e08 ++#define MGA_WGETMSB 0x1dc8 ++#define MGA_WIADDR 0x1dc0 ++#define MGA_WIADDR2 0x1dd8 ++# define MGA_WMODE_SUSPEND (0 << 0) ++# define MGA_WMODE_RESUME (1 << 0) ++# define MGA_WMODE_JUMP (2 << 0) ++# define MGA_WMODE_START (3 << 0) ++# define MGA_WAGP_ENABLE (1 << 2) ++#define MGA_WMISC 0x1e70 ++# define MGA_WUCODECACHE_ENABLE (1 << 0) ++# define MGA_WMASTER_ENABLE (1 << 1) ++# define MGA_WCACHEFLUSH_ENABLE (1 << 3) ++#define MGA_WVRTXSZ 0x1dcc ++ ++#define MGA_YBOT 0x1c9c ++#define MGA_YDST 0x1c90 ++#define MGA_YDSTLEN 0x1c88 ++#define MGA_YDSTORG 0x1c94 ++#define MGA_YTOP 0x1c98 ++ ++#define MGA_ZORG 0x1c0c ++ ++/* This finishes the current batch of commands ++ */ ++#define MGA_EXEC 0x0100 ++ ++/* AGP PLL encoding (for G200 only). ++ */ ++#define MGA_AGP_PLL 0x1e4c ++# define MGA_AGP2XPLL_DISABLE (0 << 0) ++# define MGA_AGP2XPLL_ENABLE (1 << 0) ++ ++/* Warp registers ++ */ ++#define MGA_WR0 0x2d00 ++#define MGA_WR1 0x2d04 ++#define MGA_WR2 0x2d08 ++#define MGA_WR3 0x2d0c ++#define MGA_WR4 0x2d10 ++#define MGA_WR5 0x2d14 ++#define MGA_WR6 0x2d18 ++#define MGA_WR7 0x2d1c ++#define MGA_WR8 0x2d20 ++#define MGA_WR9 0x2d24 ++#define MGA_WR10 0x2d28 ++#define MGA_WR11 0x2d2c ++#define MGA_WR12 0x2d30 ++#define MGA_WR13 0x2d34 ++#define MGA_WR14 0x2d38 ++#define MGA_WR15 0x2d3c ++#define MGA_WR16 0x2d40 ++#define MGA_WR17 0x2d44 ++#define MGA_WR18 0x2d48 ++#define MGA_WR19 0x2d4c ++#define MGA_WR20 0x2d50 ++#define MGA_WR21 0x2d54 ++#define MGA_WR22 0x2d58 ++#define MGA_WR23 0x2d5c ++#define MGA_WR24 0x2d60 ++#define MGA_WR25 0x2d64 ++#define MGA_WR26 0x2d68 ++#define MGA_WR27 0x2d6c ++#define MGA_WR28 0x2d70 ++#define MGA_WR29 0x2d74 ++#define MGA_WR30 0x2d78 ++#define MGA_WR31 0x2d7c ++#define MGA_WR32 0x2d80 ++#define MGA_WR33 0x2d84 ++#define MGA_WR34 0x2d88 ++#define MGA_WR35 0x2d8c ++#define MGA_WR36 0x2d90 ++#define MGA_WR37 0x2d94 ++#define MGA_WR38 0x2d98 ++#define MGA_WR39 0x2d9c ++#define MGA_WR40 0x2da0 ++#define MGA_WR41 0x2da4 ++#define MGA_WR42 0x2da8 ++#define MGA_WR43 0x2dac ++#define MGA_WR44 0x2db0 ++#define MGA_WR45 0x2db4 ++#define MGA_WR46 0x2db8 ++#define MGA_WR47 0x2dbc ++#define MGA_WR48 0x2dc0 ++#define MGA_WR49 0x2dc4 ++#define MGA_WR50 0x2dc8 ++#define MGA_WR51 0x2dcc ++#define MGA_WR52 0x2dd0 ++#define MGA_WR53 0x2dd4 ++#define MGA_WR54 0x2dd8 ++#define MGA_WR55 0x2ddc ++#define MGA_WR56 0x2de0 ++#define MGA_WR57 0x2de4 ++#define MGA_WR58 0x2de8 ++#define MGA_WR59 0x2dec ++#define MGA_WR60 0x2df0 ++#define MGA_WR61 0x2df4 ++#define MGA_WR62 0x2df8 ++#define MGA_WR63 0x2dfc ++# define MGA_G400_WR_MAGIC (1 << 6) ++# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */ ++ ++#define MGA_ILOAD_ALIGN 64 ++#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1) ++ ++#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \ ++ MGA_ATYPE_I | \ ++ MGA_ZMODE_NOZCMP | \ ++ MGA_ARZERO | \ ++ MGA_SGNZERO | \ ++ MGA_BOP_SRC | \ ++ (15 << MGA_TRANS_SHIFT)) ++ ++#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \ ++ MGA_ZMODE_NOZCMP | \ ++ MGA_SOLID | \ ++ MGA_ARZERO | \ ++ MGA_SGNZERO | \ ++ MGA_SHIFTZERO | \ ++ MGA_BOP_SRC | \ ++ (0 << MGA_TRANS_SHIFT) | \ ++ MGA_BLTMOD_BMONOLEF | \ ++ MGA_TRANSC | \ ++ MGA_CLIPDIS) ++ ++#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \ ++ MGA_ATYPE_RPL | \ ++ MGA_SGNZERO | \ ++ MGA_SHIFTZERO | \ ++ MGA_BOP_SRC | \ ++ (0 << MGA_TRANS_SHIFT) | \ ++ MGA_BLTMOD_BFCOL | \ ++ MGA_CLIPDIS) ++ ++/* Simple idle test. ++ */ ++static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv) ++{ ++ u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; ++ return (status == MGA_ENDPRDMASTS); ++} ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/mga_ioc32.c git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c +--- git/drivers/gpu/drm-tungsten/mga_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,234 @@ ++ ++/** ++ * \file mga_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the MGA DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++ ++typedef struct drm32_mga_init { ++ int func; ++ u32 sarea_priv_offset; ++ int chipset; ++ int sgram; ++ unsigned int maccess; ++ unsigned int fb_cpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_cpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int texture_offset[MGA_NR_TEX_HEAPS]; ++ unsigned int texture_size[MGA_NR_TEX_HEAPS]; ++ u32 fb_offset; ++ u32 mmio_offset; ++ u32 status_offset; ++ u32 warp_offset; ++ u32 primary_offset; ++ u32 buffers_offset; ++} drm_mga_init32_t; ++ ++static int compat_mga_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_init32_t init32; ++ drm_mga_init_t __user *init; ++ int err = 0, i; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.chipset, &init->chipset) ++ || __put_user(init32.sgram, &init->sgram) ++ || __put_user(init32.maccess, &init->maccess) ++ || __put_user(init32.fb_cpp, &init->fb_cpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_cpp, &init->depth_cpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.status_offset, &init->status_offset) ++ || __put_user(init32.warp_offset, &init->warp_offset) ++ || __put_user(init32.primary_offset, &init->primary_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset)) ++ return -EFAULT; ++ ++ for (i=0; itexture_offset[i]); ++ err |= __put_user(init32.texture_size[i], &init->texture_size[i]); ++ } ++ if (err) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_INIT, (unsigned long) init); ++} ++ ++ ++typedef struct drm_mga_getparam32 { ++ int param; ++ u32 value; ++} drm_mga_getparam32_t; ++ ++ ++static int compat_mga_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_getparam32_t getparam32; ++ drm_mga_getparam_t __user *getparam; ++ ++ if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) ++ return -EFAULT; ++ ++ getparam = compat_alloc_user_space(sizeof(*getparam)); ++ if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) ++ || __put_user(getparam32.param, &getparam->param) ++ || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); ++} ++ ++typedef struct drm_mga_drm_bootstrap32 { ++ u32 texture_handle; ++ u32 texture_size; ++ u32 primary_size; ++ u32 secondary_bin_count; ++ u32 secondary_bin_size; ++ u32 agp_mode; ++ u8 agp_size; ++} drm_mga_dma_bootstrap32_t; ++ ++static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_mga_dma_bootstrap32_t dma_bootstrap32; ++ drm_mga_dma_bootstrap_t __user *dma_bootstrap; ++ int err; ++ ++ if (copy_from_user(&dma_bootstrap32, (void __user *)arg, ++ sizeof(dma_bootstrap32))) ++ return -EFAULT; ++ ++ dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap)); ++ if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap)) ++ || __put_user(dma_bootstrap32.texture_handle, ++ &dma_bootstrap->texture_handle) ++ || __put_user(dma_bootstrap32.texture_size, ++ &dma_bootstrap->texture_size) ++ || __put_user(dma_bootstrap32.primary_size, ++ &dma_bootstrap->primary_size) ++ || __put_user(dma_bootstrap32.secondary_bin_count, ++ &dma_bootstrap->secondary_bin_count) ++ || __put_user(dma_bootstrap32.secondary_bin_size, ++ &dma_bootstrap->secondary_bin_size) ++ || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) ++ || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) ++ return -EFAULT; ++ ++ err = drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_MGA_DMA_BOOTSTRAP, ++ (unsigned long)dma_bootstrap); ++ if (err) ++ return err; ++ ++ if (__get_user(dma_bootstrap32.texture_handle, ++ &dma_bootstrap->texture_handle) ++ || __get_user(dma_bootstrap32.texture_size, ++ &dma_bootstrap->texture_size) ++ || __get_user(dma_bootstrap32.primary_size, ++ &dma_bootstrap->primary_size) ++ || __get_user(dma_bootstrap32.secondary_bin_count, ++ &dma_bootstrap->secondary_bin_count) ++ || __get_user(dma_bootstrap32.secondary_bin_size, ++ &dma_bootstrap->secondary_bin_size) ++ || __get_user(dma_bootstrap32.agp_mode, ++ &dma_bootstrap->agp_mode) ++ || __get_user(dma_bootstrap32.agp_size, ++ &dma_bootstrap->agp_size)) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, &dma_bootstrap32, ++ sizeof(dma_bootstrap32))) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++drm_ioctl_compat_t *mga_compat_ioctls[] = { ++ [DRM_MGA_INIT] = compat_mga_init, ++ [DRM_MGA_GETPARAM] = compat_mga_getparam, ++ [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long mga_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) ++ fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/mga_irq.c git-nokia/drivers/gpu/drm-tungsten/mga_irq.c +--- git/drivers/gpu/drm-tungsten/mga_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,182 @@ ++/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- ++ */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ const drm_mga_private_t *const dev_priv = ++ (drm_mga_private_t *) dev->dev_private; ++ ++ if (crtc != 0) { ++ return 0; ++ } ++ ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++ ++irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ int status; ++ int handled = 0; ++ ++ status = MGA_READ(MGA_STATUS); ++ ++ /* VBLANK interrupt */ ++ if (status & MGA_VLINEPEN) { ++ MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ handled = 1; ++ } ++ ++ /* SOFTRAP interrupt */ ++ if (status & MGA_SOFTRAPEN) { ++ const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); ++ const u32 prim_end = MGA_READ(MGA_PRIMEND); ++ ++ ++ MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); ++ ++ /* In addition to clearing the interrupt-pending bit, we ++ * have to write to MGA_PRIMEND to re-start the DMA operation. ++ */ ++ if ((prim_start & ~0x03) != (prim_end & ~0x03)) { ++ MGA_WRITE(MGA_PRIMEND, prim_end); ++ } ++ ++ atomic_inc(&dev_priv->last_fence_retired); ++ DRM_WAKEUP(&dev_priv->fence_queue); ++ handled = 1; ++ } ++ ++ if (handled) ++ return IRQ_HANDLED; ++ return IRQ_NONE; ++} ++ ++int mga_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ if (crtc != 0) { ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return 0; ++ } ++ ++ MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); ++ return 0; ++} ++ ++ ++void mga_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ if (crtc != 0) { ++ DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", ++ crtc); ++ } ++ ++ /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have ++ * a nice hardware counter that tracks the number of refreshes when ++ * the interrupt is disabled, and the kernel doesn't know the refresh ++ * rate to calculate an estimate. ++ */ ++ /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ ++} ++ ++int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ unsigned int cur_fence; ++ int ret = 0; ++ ++ /* Assume that the user has missed the current sequence number ++ * by about a day rather than she wants to wait for years ++ * using fences. ++ */ ++ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, ++ (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) ++ - *sequence) <= (1 << 23))); ++ ++ *sequence = cur_fence; ++ ++ return ret; ++} ++ ++void mga_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ MGA_WRITE(MGA_IEN, 0); ++ /* Clear bits if they're already high */ ++ MGA_WRITE(MGA_ICLEAR, ~0); ++} ++ ++int mga_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ int ret; ++ ++ ret = drm_vblank_init(dev, 1); ++ if (ret) ++ return ret; ++ ++ DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); ++ ++ /* Turn on soft trap interrupt. Vertical blank interrupts are enabled ++ * in mga_enable_vblank. ++ */ ++ MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); ++ return 0; ++} ++ ++void mga_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ /* Disable *all* interrupts */ ++ MGA_WRITE(MGA_IEN, 0); ++ ++ dev->irq_enabled = 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/mga_state.c git-nokia/drivers/gpu/drm-tungsten/mga_state.c +--- git/drivers/gpu/drm-tungsten/mga_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1139 @@ ++/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*- ++ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com ++ */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Jeff Hartmann ++ * Keith Whitwell ++ * ++ * Rewritten by: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++ ++/* ================================================================ ++ * DMA hardware state programming functions ++ */ ++ ++static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ unsigned int pitch = dev_priv->front_pitch; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(2); ++ ++ /* Force reset of DWGCTL on G400 (eliminates clip disable bit). ++ */ ++ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { ++ DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, ++ MGA_LEN + MGA_EXEC, 0x80000000, ++ MGA_DWGCTL, ctx->dwgctl, ++ MGA_LEN + MGA_EXEC, 0x80000000); ++ } ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, ++ MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(3); ++ ++ DMA_BLOCK(MGA_DSTORG, ctx->dstorg, ++ MGA_MACCESS, ctx->maccess, ++ MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); ++ ++ DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, ++ MGA_FOGCOL, ctx->fogcolor, ++ MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); ++ ++ DMA_BLOCK(MGA_FCOL, ctx->fcol, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(4); ++ ++ DMA_BLOCK(MGA_DSTORG, ctx->dstorg, ++ MGA_MACCESS, ctx->maccess, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, ++ MGA_FOGCOL, ctx->fogcolor, ++ MGA_WFLAG, ctx->wflag, ++ MGA_ZORG, dev_priv->depth_offset); ++ ++ DMA_BLOCK(MGA_WFLAG1, ctx->wflag, ++ MGA_TDUALSTAGE0, ctx->tdualstage0, ++ MGA_TDUALSTAGE1, ctx->tdualstage1, ++ MGA_FCOL, ctx->fcol); ++ ++ DMA_BLOCK(MGA_STENCIL, ctx->stencil, ++ MGA_STENCILCTL, ctx->stencilctl, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(4); ++ ++ DMA_BLOCK(MGA_TEXCTL2, tex->texctl2, ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR24, tex->texwidth); ++ ++ DMA_BLOCK(MGA_WR34, tex->texheight, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff, ++ MGA_DMAPAD, 0x00000000); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ ++/* tex->texctl, tex->texctl2); */ ++ ++ BEGIN_DMA(6); ++ ++ DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC, ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR49, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR57, 0x00000000, ++ MGA_WR53, 0x00000000, ++ MGA_WR61, 0x00000000, ++ MGA_WR52, MGA_G400_WR_MAGIC); ++ ++ DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC, ++ MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC, ++ MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ ++/* tex->texctl, tex->texctl2); */ ++ ++ BEGIN_DMA(5); ++ ++ DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 | ++ MGA_MAP1_ENABLE | ++ MGA_G400_TC2_MAGIC), ++ MGA_TEXCTL, tex->texctl, ++ MGA_TEXFILTER, tex->texfilter, ++ MGA_TEXBORDERCOL, tex->texbordercol); ++ ++ DMA_BLOCK(MGA_TEXORG, tex->texorg, ++ MGA_TEXORG1, tex->texorg1, ++ MGA_TEXORG2, tex->texorg2, ++ MGA_TEXORG3, tex->texorg3); ++ ++ DMA_BLOCK(MGA_TEXORG4, tex->texorg4, ++ MGA_TEXWIDTH, tex->texwidth, ++ MGA_TEXHEIGHT, tex->texheight, ++ MGA_WR49, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR57, 0x00000000, ++ MGA_WR53, 0x00000000, ++ MGA_WR61, 0x00000000, ++ MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC); ++ ++ DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC, ++ MGA_TEXTRANS, 0x0000ffff, ++ MGA_TEXTRANSHIGH, 0x0000ffff, ++ MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int pipe = sarea_priv->warp_pipe; ++ DMA_LOCALS; ++ ++ BEGIN_DMA(3); ++ ++ DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND, ++ MGA_WVRTXSZ, 0x00000007, ++ MGA_WFLAG, 0x00000000, ++ MGA_WR24, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR25, 0x00000100, ++ MGA_WR34, 0x00000000, ++ MGA_WR42, 0x0000ffff, ++ MGA_WR60, 0x0000ffff); ++ ++ /* Padding required to to hardware bug. ++ */ ++ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | ++ MGA_WMODE_START | dev_priv->wagp_enable)); ++ ++ ADVANCE_DMA(); ++} ++ ++static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int pipe = sarea_priv->warp_pipe; ++ DMA_LOCALS; ++ ++/* printk("mga_g400_emit_pipe %x\n", pipe); */ ++ ++ BEGIN_DMA(10); ++ ++ DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ if (pipe & MGA_T2) { ++ DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x1e000000); ++ } else { ++ if (dev_priv->warp_pipe & MGA_T2) { ++ /* Flush the WARP pipe */ ++ DMA_BLOCK(MGA_YDST, 0x00000000, ++ MGA_FXLEFT, 0x00000000, ++ MGA_FXRIGHT, 0x00000001, ++ MGA_DWGCTL, MGA_DWGCTL_FLUSH); ++ ++ DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001, ++ MGA_DWGSYNC, 0x00007000, ++ MGA_TEXCTL2, MGA_G400_TC2_MAGIC, ++ MGA_LEN + MGA_EXEC, 0x00000000); ++ ++ DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX | ++ MGA_G400_TC2_MAGIC), ++ MGA_LEN + MGA_EXEC, 0x00000000, ++ MGA_TEXCTL2, MGA_G400_TC2_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ } ++ ++ DMA_BLOCK(MGA_WVRTXSZ, 0x00001807, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x00000000, ++ MGA_WACCEPTSEQ, 0x18000000); ++ } ++ ++ DMA_BLOCK(MGA_WFLAG, 0x00000000, ++ MGA_WFLAG1, 0x00000000, ++ MGA_WR56, MGA_G400_WR56_MAGIC, ++ MGA_DMAPAD, 0x00000000); ++ ++ DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */ ++ MGA_WR57, 0x00000000, /* tex0 */ ++ MGA_WR53, 0x00000000, /* tex1 */ ++ MGA_WR61, 0x00000000); /* tex1 */ ++ ++ DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */ ++ MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */ ++ MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ ++ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ ++ ++ /* Padding required to to hardware bug */ ++ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_DMAPAD, 0xffffffff, ++ MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | ++ MGA_WMODE_START | dev_priv->wagp_enable)); ++ ++ ADVANCE_DMA(); ++} ++ ++static void mga_g200_emit_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { ++ mga_g200_emit_pipe(dev_priv); ++ dev_priv->warp_pipe = sarea_priv->warp_pipe; ++ } ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) { ++ mga_g200_emit_context(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & MGA_UPLOAD_TEX0) { ++ mga_g200_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; ++ } ++} ++ ++static void mga_g400_emit_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ int multitex = sarea_priv->warp_pipe & MGA_T2; ++ ++ if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { ++ mga_g400_emit_pipe(dev_priv); ++ dev_priv->warp_pipe = sarea_priv->warp_pipe; ++ } ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) { ++ mga_g400_emit_context(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & MGA_UPLOAD_TEX0) { ++ mga_g400_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; ++ } ++ ++ if ((dirty & MGA_UPLOAD_TEX1) && multitex) { ++ mga_g400_emit_tex1(dev_priv); ++ sarea_priv->dirty &= ~MGA_UPLOAD_TEX1; ++ } ++} ++ ++/* ================================================================ ++ * SAREA state verification ++ */ ++ ++/* Disallow all write destinations except the front and backbuffer. ++ */ ++static int mga_verify_context(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ ++ if (ctx->dstorg != dev_priv->front_offset && ++ ctx->dstorg != dev_priv->back_offset) { ++ DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n", ++ ctx->dstorg, dev_priv->front_offset, ++ dev_priv->back_offset); ++ ctx->dstorg = 0; ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* Disallow texture reads from PCI space. ++ */ ++static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; ++ unsigned int org; ++ ++ org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK); ++ ++ if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { ++ DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); ++ tex->texorg = 0; ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int mga_verify_state(drm_mga_private_t * dev_priv) ++{ ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ int ret = 0; ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ if (dirty & MGA_UPLOAD_CONTEXT) ++ ret |= mga_verify_context(dev_priv); ++ ++ if (dirty & MGA_UPLOAD_TEX0) ++ ret |= mga_verify_tex(dev_priv, 0); ++ ++ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { ++ if (dirty & MGA_UPLOAD_TEX1) ++ ret |= mga_verify_tex(dev_priv, 1); ++ ++ if (dirty & MGA_UPLOAD_PIPE) ++ ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES); ++ } else { ++ if (dirty & MGA_UPLOAD_PIPE) ++ ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES); ++ } ++ ++ return (ret == 0); ++} ++ ++static int mga_verify_iload(drm_mga_private_t * dev_priv, ++ unsigned int dstorg, unsigned int length) ++{ ++ if (dstorg < dev_priv->texture_offset || ++ dstorg + length > (dev_priv->texture_offset + ++ dev_priv->texture_size)) { ++ DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); ++ return -EINVAL; ++ } ++ ++ if (length & MGA_ILOAD_MASK) { ++ DRM_ERROR("*** bad iload length: 0x%x\n", ++ length & MGA_ILOAD_MASK); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int mga_verify_blit(drm_mga_private_t * dev_priv, ++ unsigned int srcorg, unsigned int dstorg) ++{ ++ if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || ++ (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { ++ DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/* ================================================================ ++ * ++ */ ++ ++static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ ADVANCE_DMA(); ++ ++ for (i = 0; i < nbox; i++) { ++ struct drm_clip_rect *box = &pbox[i]; ++ u32 height = box->y2 - box->y1; ++ ++ DRM_DEBUG(" from=%d,%d to=%d,%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ if (clear->flags & MGA_FRONT) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->color_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_color, ++ MGA_DSTORG, dev_priv->front_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ if (clear->flags & MGA_BACK) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->color_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_color, ++ MGA_DSTORG, dev_priv->back_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ if (clear->flags & MGA_DEPTH) { ++ BEGIN_DMA(2); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, clear->depth_mask, ++ MGA_YDSTLEN, (box->y1 << 16) | height, ++ MGA_FXBNDRY, (box->x2 << 16) | box->x1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_FCOL, clear->clear_depth, ++ MGA_DSTORG, dev_priv->depth_offset, ++ MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ++ ++ ADVANCE_DMA(); ++ } ++ ++ } ++ ++ BEGIN_DMA(1); ++ ++ /* Force reset of DWGCTL */ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_swap(struct drm_device * dev) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ int i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ sarea_priv->last_frame.head = dev_priv->prim.tail; ++ sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; ++ ++ BEGIN_DMA(4 + nbox); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset, ++ MGA_MACCESS, dev_priv->maccess, ++ MGA_SRCORG, dev_priv->back_offset, ++ MGA_AR5, dev_priv->front_pitch); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, 0xffffffff, ++ MGA_DWGCTL, MGA_DWGCTL_COPY); ++ ++ for (i = 0; i < nbox; i++) { ++ struct drm_clip_rect *box = &pbox[i]; ++ u32 height = box->y2 - box->y1; ++ u32 start = box->y1 * dev_priv->front_pitch; ++ ++ DRM_DEBUG(" from=%d,%d to=%d,%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ DMA_BLOCK(MGA_AR0, start + box->x2 - 1, ++ MGA_AR3, start + box->x1, ++ MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1, ++ MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height); ++ } ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_SRCORG, dev_priv->front_offset, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++ ++ FLUSH_DMA(); ++ ++ DRM_DEBUG("... done.\n"); ++} ++ ++static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 address = (u32) buf->bus_address; ++ u32 length = (u32) buf->used; ++ int i = 0; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); ++ ++ if (buf->used) { ++ buf_priv->dispatched = 1; ++ ++ MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); ++ ++ do { ++ if (i < sarea_priv->nbox) { ++ mga_emit_clip_rect(dev_priv, ++ &sarea_priv->boxes[i]); ++ } ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SECADDRESS, (address | ++ MGA_DMA_VERTEX), ++ MGA_SECEND, ((address + length) | ++ dev_priv->dma_access)); ++ ++ ADVANCE_DMA(); ++ } while (++i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ AGE_BUFFER(buf_priv); ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ } ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, ++ unsigned int start, unsigned int end) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ u32 address = (u32) buf->bus_address; ++ int i = 0; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end); ++ ++ if (start != end) { ++ buf_priv->dispatched = 1; ++ ++ MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); ++ ++ do { ++ if (i < sarea_priv->nbox) { ++ mga_emit_clip_rect(dev_priv, ++ &sarea_priv->boxes[i]); ++ } ++ ++ BEGIN_DMA(1); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SETUPADDRESS, address + start, ++ MGA_SETUPEND, ((address + end) | ++ dev_priv->dma_access)); ++ ++ ADVANCE_DMA(); ++ } while (++i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ AGE_BUFFER(buf_priv); ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ } ++ ++ FLUSH_DMA(); ++} ++ ++/* This copies a 64 byte aligned agp region to the frambuffer with a ++ * standard blit, the ioctl needs to do checking. ++ */ ++static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, ++ unsigned int dstorg, unsigned int length) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_buf_priv_t *buf_priv = buf->dev_private; ++ drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; ++ u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; ++ u32 y2; ++ DMA_LOCALS; ++ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); ++ ++ y2 = length / 64; ++ ++ BEGIN_DMA(5); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DSTORG, dstorg, ++ MGA_MACCESS, 0x00000000, ++ MGA_SRCORG, srcorg, ++ MGA_AR5, 64); ++ ++ DMA_BLOCK(MGA_PITCH, 64, ++ MGA_PLNWT, 0xffffffff, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGCTL, MGA_DWGCTL_COPY); ++ ++ DMA_BLOCK(MGA_AR0, 63, ++ MGA_AR3, 0, ++ MGA_FXBNDRY, (63 << 16) | 0, ++ MGA_YDSTLEN + MGA_EXEC, y2); ++ ++ DMA_BLOCK(MGA_PLNWT, ctx->plnwt, ++ MGA_SRCORG, dev_priv->front_offset, ++ MGA_PITCH, dev_priv->front_pitch, ++ MGA_DWGSYNC, 0x00007000); ++ ++ ADVANCE_DMA(); ++ ++ AGE_BUFFER(buf_priv); ++ ++ buf->pending = 0; ++ buf->used = 0; ++ buf_priv->dispatched = 0; ++ ++ mga_freelist_put(dev, buf); ++ ++ FLUSH_DMA(); ++} ++ ++static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_context_regs_t *ctx = &sarea_priv->context_state; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int nbox = sarea_priv->nbox; ++ u32 scandir = 0, i; ++ DMA_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_DMA(4 + nbox); ++ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DWGSYNC, 0x00007100, ++ MGA_DWGSYNC, 0x00007000); ++ ++ DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY, ++ MGA_PLNWT, blit->planemask, ++ MGA_SRCORG, blit->srcorg, ++ MGA_DSTORG, blit->dstorg); ++ ++ DMA_BLOCK(MGA_SGN, scandir, ++ MGA_MACCESS, dev_priv->maccess, ++ MGA_AR5, blit->ydir * blit->src_pitch, ++ MGA_PITCH, blit->dst_pitch); ++ ++ for (i = 0; i < nbox; i++) { ++ int srcx = pbox[i].x1 + blit->delta_sx; ++ int srcy = pbox[i].y1 + blit->delta_sy; ++ int dstx = pbox[i].x1 + blit->delta_dx; ++ int dsty = pbox[i].y1 + blit->delta_dy; ++ int h = pbox[i].y2 - pbox[i].y1; ++ int w = pbox[i].x2 - pbox[i].x1 - 1; ++ int start; ++ ++ if (blit->ydir == -1) { ++ srcy = blit->height - srcy - 1; ++ } ++ ++ start = srcy * blit->src_pitch + srcx; ++ ++ DMA_BLOCK(MGA_AR0, start + w, ++ MGA_AR3, start, ++ MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff), ++ MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h); ++ } ++ ++ /* Do something to flush AGP? ++ */ ++ ++ /* Force reset of DWGCTL */ ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_PLNWT, ctx->plnwt, ++ MGA_PITCH, dev_priv->front_pitch, ++ MGA_DWGCTL, ctx->dwgctl); ++ ++ ADVANCE_DMA(); ++} ++ ++/* ================================================================ ++ * ++ */ ++ ++static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_clear_t *clear = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_clear(dev, clear); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_swap(dev); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (vertex->idx < 0 || vertex->idx > dma->buf_count) ++ return -EINVAL; ++ buf = dma->buflist[vertex->idx]; ++ buf_priv = buf->dev_private; ++ ++ buf->used = vertex->used; ++ buf_priv->discard = vertex->discard; ++ ++ if (!mga_verify_state(dev_priv)) { ++ if (vertex->discard) { ++ if (buf_priv->dispatched == 1) ++ AGE_BUFFER(buf_priv); ++ buf_priv->dispatched = 0; ++ mga_freelist_put(dev, buf); ++ } ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_vertex(dev, buf); ++ ++ return 0; ++} ++ ++static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_indices_t *indices = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (indices->idx < 0 || indices->idx > dma->buf_count) ++ return -EINVAL; ++ ++ buf = dma->buflist[indices->idx]; ++ buf_priv = buf->dev_private; ++ ++ buf_priv->discard = indices->discard; ++ ++ if (!mga_verify_state(dev_priv)) { ++ if (indices->discard) { ++ if (buf_priv->dispatched == 1) ++ AGE_BUFFER(buf_priv); ++ buf_priv->dispatched = 0; ++ mga_freelist_put(dev, buf); ++ } ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); ++ ++ return 0; ++} ++ ++static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ drm_mga_buf_priv_t *buf_priv; ++ drm_mga_iload_t *iload = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++#if 0 ++ if (mga_do_wait_for_idle(dev_priv) < 0) { ++ if (MGA_DMA_DEBUG) ++ DRM_INFO("-EBUSY\n"); ++ return -EBUSY; ++ } ++#endif ++ if (iload->idx < 0 || iload->idx > dma->buf_count) ++ return -EINVAL; ++ ++ buf = dma->buflist[iload->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { ++ mga_freelist_put(dev, buf); ++ return -EINVAL; ++ } ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_mga_blit_t *blit = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; ++ ++ if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) ++ return -EINVAL; ++ ++ WRAP_TEST_WITH_RETURN(dev_priv); ++ ++ mga_dma_dispatch_blit(dev, blit); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; ++ ++ return 0; ++} ++ ++static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ drm_mga_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case MGA_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ case MGA_PARAM_CARD_TYPE: ++ value = dev_priv->chipset; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ u32 *fence = data; ++ DMA_LOCALS; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ /* I would normal do this assignment in the declaration of fence, ++ * but dev_priv may be NULL. ++ */ ++ ++ *fence = dev_priv->next_fence_to_post; ++ dev_priv->next_fence_to_post++; ++ ++ BEGIN_DMA(1); ++ DMA_BLOCK(MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_DMAPAD, 0x00000000, ++ MGA_SOFTRAP, 0x00000000); ++ ADVANCE_DMA(); ++ ++ return 0; ++} ++ ++static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_mga_private_t *dev_priv = dev->dev_private; ++ u32 *fence = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ mga_driver_fence_wait(dev, fence); ++ ++ return 0; ++} ++ ++struct drm_ioctl_desc mga_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ ++}; ++ ++int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); +diff -Nurd git/drivers/gpu/drm-tungsten/mga_ucode.h git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h +--- git/drivers/gpu/drm-tungsten/mga_ucode.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,11645 @@ ++/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*- ++ * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com ++ * ++ * Copyright 1999 Matrox Graphics Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included ++ * in all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE ++ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Kernel-based WARP engine management: ++ * Gareth Hughes ++ */ ++ ++/* ++ * WARP pipes are named according to the functions they perform, where: ++ * ++ * - T stands for computation of texture stage 0 ++ * - T2 stands for computation of both texture stage 0 and texture stage 1 ++ * - G stands for computation of triangle intensity (Gouraud interpolation) ++ * - Z stands for computation of Z buffer interpolation ++ * - S stands for computation of specular highlight ++ * - A stands for computation of the alpha channel ++ * - F stands for computation of vertex fog interpolation ++ */ ++ ++static unsigned char warp_g200_tgz[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x72, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x60, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x03, 0x80, 0x0A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x57, 0x39, 0x20, 0xE9, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x2B, 0x32, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x16, 0x28, 0x20, 0xE9, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x2B, 0x20, 0xE9, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x85, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x84, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x82, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x7F, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgza[] = { ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x7D, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1F, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3F, 0x3D, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0x26, 0x1F, 0xDF, ++ 0x9D, 0x1F, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x9E, 0x3F, 0x4F, 0xE9, ++ ++ 0x07, 0x07, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x9C, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x7A, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x79, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x77, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzaf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x83, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6F, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x2D, 0x20, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x1F, 0x62, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x3F, 0x3D, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0x1F, 0x26, 0x1F, 0xDF, ++ 0x9D, 0x1F, 0x4F, 0xE9, ++ ++ 0x9E, 0x3F, 0x4F, 0xE9, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x07, 0x07, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x9C, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x73, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x71, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6E, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x7F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x6B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0xB3, 0x05, ++ 0x00, 0xE0, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x78, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x77, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x75, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x72, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzs[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8B, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x77, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x8F, 0x20, ++ ++ 0xA5, 0x37, 0x4F, 0xE9, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x6C, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6B, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x69, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsa[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x7B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x44, 0x4C, 0xB6, ++ 0x05, 0x44, 0x54, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2F, 0xC0, 0x44, 0xC6, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x9D, 0x17, 0x4F, 0xE9, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x9E, 0x37, 0x4F, 0xE9, ++ 0x2F, 0x17, 0x2F, 0xAF, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x9C, 0x80, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x68, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x67, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x65, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsaf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x94, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x80, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0x2D, 0x44, 0x4C, 0xB6, ++ 0x25, 0x44, 0x54, 0xB6, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x07, 0xC0, 0x44, 0xC6, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x3E, 0x3D, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x07, 0x20, ++ ++ 0x2F, 0x20, ++ 0x00, 0xE0, ++ 0xA3, 0x0F, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ ++ 0x1E, 0x26, 0x1E, 0xDF, ++ 0x9D, 0x1E, 0x4F, 0xE9, ++ ++ 0x35, 0x17, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x07, 0x07, 0x1E, 0xAF, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x9E, 0x3E, 0x4F, 0xE9, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x9C, 0x80, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x63, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x60, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x5D, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g200_tgzsf[] = { ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x98, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x81, 0x04, ++ 0x89, 0x04, ++ 0x01, 0x04, ++ 0x09, 0x04, ++ ++ 0xC9, 0x41, 0xC0, 0xEC, ++ 0x11, 0x04, ++ 0x00, 0xE0, ++ ++ 0x41, 0xCC, 0x41, 0xCD, ++ 0x49, 0xCC, 0x49, 0xCD, ++ ++ 0xD1, 0x41, 0xC0, 0xEC, ++ 0x51, 0xCC, 0x51, 0xCD, ++ ++ 0x80, 0x04, ++ 0x10, 0x04, ++ 0x08, 0x04, ++ 0x00, 0xE0, ++ ++ 0x00, 0xCC, 0xC0, 0xCD, ++ 0xD1, 0x49, 0xC0, 0xEC, ++ ++ 0x8A, 0x1F, 0x20, 0xE9, ++ 0x8B, 0x3F, 0x20, 0xE9, ++ ++ 0x41, 0x3C, 0x41, 0xAD, ++ 0x49, 0x3C, 0x49, 0xAD, ++ ++ 0x10, 0xCC, 0x10, 0xCD, ++ 0x08, 0xCC, 0x08, 0xCD, ++ ++ 0xB9, 0x41, 0x49, 0xBB, ++ 0x1F, 0xF0, 0x41, 0xCD, ++ ++ 0x51, 0x3C, 0x51, 0xAD, ++ 0x00, 0x98, 0x80, 0xE9, ++ ++ 0x8F, 0x80, 0x07, 0xEA, ++ 0x24, 0x1F, 0x20, 0xE9, ++ ++ 0x21, 0x45, 0x80, 0xE8, ++ 0x1A, 0x4D, 0x80, 0xE8, ++ ++ 0x31, 0x55, 0x80, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0x41, 0x49, 0xBD, ++ 0x1D, 0x41, 0x51, 0xBD, ++ ++ 0x2E, 0x41, 0x2A, 0xB8, ++ 0x34, 0x53, 0xA0, 0xE8, ++ ++ 0x15, 0x30, ++ 0x1D, 0x30, ++ 0x58, 0xE3, ++ 0x00, 0xE0, ++ ++ 0xB5, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x24, 0x43, 0xA0, 0xE8, ++ 0x2C, 0x4B, 0xA0, 0xE8, ++ ++ 0x15, 0x72, ++ 0x09, 0xE3, ++ 0x00, 0xE0, ++ 0x1D, 0x72, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0x97, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x6C, 0x64, 0xC8, 0xEC, ++ 0x98, 0xE1, ++ 0xB5, 0x05, ++ ++ 0xBD, 0x05, ++ 0x2E, 0x30, ++ 0x32, 0xC0, 0xA0, 0xE8, ++ ++ 0x33, 0xC0, 0xA0, 0xE8, ++ 0x74, 0x64, 0xC8, 0xEC, ++ ++ 0x40, 0x3C, 0x40, 0xAD, ++ 0x32, 0x6A, ++ 0x2A, 0x30, ++ ++ 0x20, 0x73, ++ 0x33, 0x6A, ++ 0x00, 0xE0, ++ 0x28, 0x73, ++ ++ 0x1C, 0x72, ++ 0x83, 0xE2, ++ 0x7B, 0x80, 0x15, 0xEA, ++ ++ 0xB8, 0x3D, 0x28, 0xDF, ++ 0x30, 0x35, 0x20, 0xDF, ++ ++ 0x40, 0x30, ++ 0x00, 0xE0, ++ 0xCC, 0xE2, ++ 0x64, 0x72, ++ ++ 0x25, 0x42, 0x52, 0xBF, ++ 0x2D, 0x42, 0x4A, 0xBF, ++ ++ 0x30, 0x2E, 0x30, 0xDF, ++ 0x38, 0x2E, 0x38, 0xDF, ++ ++ 0x18, 0x1D, 0x45, 0xE9, ++ 0x1E, 0x15, 0x45, 0xE9, ++ ++ 0x2B, 0x49, 0x51, 0xBD, ++ 0x00, 0xE0, ++ 0x1F, 0x73, ++ ++ 0x38, 0x38, 0x40, 0xAF, ++ 0x30, 0x30, 0x40, 0xAF, ++ ++ 0x24, 0x1F, 0x24, 0xDF, ++ 0x1D, 0x32, 0x20, 0xE9, ++ ++ 0x2C, 0x1F, 0x2C, 0xDF, ++ 0x1A, 0x33, 0x20, 0xE9, ++ ++ 0xB0, 0x10, ++ 0x08, 0xE3, ++ 0x40, 0x10, ++ 0xB8, 0x10, ++ ++ 0x26, 0xF0, 0x30, 0xCD, ++ 0x2F, 0xF0, 0x38, 0xCD, ++ ++ 0x2B, 0x80, 0x20, 0xE9, ++ 0x2A, 0x80, 0x20, 0xE9, ++ ++ 0xA6, 0x20, ++ 0x88, 0xE2, ++ 0x00, 0xE0, ++ 0xAF, 0x20, ++ ++ 0x28, 0x2A, 0x26, 0xAF, ++ 0x20, 0x2A, 0xC0, 0xAF, ++ ++ 0x34, 0x1F, 0x34, 0xDF, ++ 0x46, 0x24, 0x46, 0xDF, ++ ++ 0x28, 0x30, 0x80, 0xBF, ++ 0x20, 0x38, 0x80, 0xBF, ++ ++ 0x47, 0x24, 0x47, 0xDF, ++ 0x4E, 0x2C, 0x4E, 0xDF, ++ ++ 0x4F, 0x2C, 0x4F, 0xDF, ++ 0x56, 0x34, 0x56, 0xDF, ++ ++ 0x28, 0x15, 0x28, 0xDF, ++ 0x20, 0x1D, 0x20, 0xDF, ++ ++ 0x57, 0x34, 0x57, 0xDF, ++ 0x00, 0xE0, ++ 0x1D, 0x05, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x89, 0xE2, ++ 0x2B, 0x30, ++ ++ 0x3F, 0xC1, 0x1D, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x68, ++ 0xBF, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x20, 0xC0, 0x20, 0xAF, ++ 0x28, 0x05, ++ 0x97, 0x74, ++ ++ 0x00, 0xE0, ++ 0x2A, 0x10, ++ 0x16, 0xC0, 0x20, 0xE9, ++ ++ 0x04, 0x80, 0x10, 0xEA, ++ 0x8C, 0xE2, ++ 0x95, 0x05, ++ ++ 0x28, 0xC1, 0x28, 0xAD, ++ 0x1F, 0xC1, 0x15, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA8, 0x67, ++ 0x9F, 0x6B, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x28, 0xC0, 0x28, 0xAD, ++ 0x1D, 0x25, ++ 0x20, 0x05, ++ ++ 0x28, 0x32, 0x80, 0xAD, ++ 0x40, 0x2A, 0x40, 0xBD, ++ ++ 0x1C, 0x80, 0x20, 0xE9, ++ 0x20, 0x33, 0x20, 0xAD, ++ ++ 0x20, 0x73, ++ 0x00, 0xE0, ++ 0xB6, 0x49, 0x51, 0xBB, ++ ++ 0x26, 0x2F, 0xB0, 0xE8, ++ 0x19, 0x20, 0x20, 0xE9, ++ ++ 0x35, 0x20, 0x35, 0xDF, ++ 0x3D, 0x20, 0x3D, 0xDF, ++ ++ 0x15, 0x20, 0x15, 0xDF, ++ 0x1D, 0x20, 0x1D, 0xDF, ++ ++ 0x26, 0xD0, 0x26, 0xCD, ++ 0x29, 0x49, 0x2A, 0xB8, ++ ++ 0x26, 0x40, 0x80, 0xBD, ++ 0x3B, 0x48, 0x50, 0xBD, ++ ++ 0x3E, 0x54, 0x57, 0x9F, ++ 0x00, 0xE0, ++ 0x82, 0xE1, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x26, 0x30, ++ 0x29, 0x30, ++ 0x48, 0x3C, 0x48, 0xAD, ++ ++ 0x2B, 0x72, ++ 0xC2, 0xE1, ++ 0x2C, 0xC0, 0x44, 0xC2, ++ ++ 0x05, 0x24, 0x34, 0xBF, ++ 0x0D, 0x24, 0x2C, 0xBF, ++ ++ 0x2D, 0x46, 0x4E, 0xBF, ++ 0x25, 0x46, 0x56, 0xBF, ++ ++ 0x20, 0x1D, 0x6F, 0x8F, ++ 0x32, 0x3E, 0x5F, 0xE9, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x30, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x33, 0x1E, 0x5F, 0xE9, ++ ++ 0x05, 0x44, 0x54, 0xB2, ++ 0x0D, 0x44, 0x4C, 0xB2, ++ ++ 0x19, 0xC0, 0xB0, 0xE8, ++ 0x34, 0xC0, 0x44, 0xC4, ++ ++ 0x33, 0x73, ++ 0x00, 0xE0, ++ 0x3E, 0x62, 0x57, 0x9F, ++ ++ 0x1E, 0xAF, 0x59, 0x9F, ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ ++ 0x84, 0x3E, 0x58, 0xE9, ++ 0x28, 0x1D, 0x6F, 0x8F, ++ ++ 0x05, 0x20, ++ 0x00, 0xE0, ++ 0x85, 0x1E, 0x58, 0xE9, ++ ++ 0x9B, 0x3B, 0x33, 0xDF, ++ 0x20, 0x20, 0x42, 0xAF, ++ ++ 0x30, 0x42, 0x56, 0x9F, ++ 0x80, 0x3E, 0x57, 0xE9, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x30, 0x80, 0x5F, 0xE9, ++ ++ 0x28, 0x28, 0x24, 0xAF, ++ 0x81, 0x1E, 0x57, 0xE9, ++ ++ 0x05, 0x47, 0x57, 0xBF, ++ 0x0D, 0x47, 0x4F, 0xBF, ++ ++ 0x88, 0x80, 0x58, 0xE9, ++ 0x1B, 0x29, 0x1B, 0xDF, ++ ++ 0x30, 0x1D, 0x6F, 0x8F, ++ 0x3A, 0x30, 0x4F, 0xE9, ++ ++ 0x1C, 0x30, 0x26, 0xDF, ++ 0x09, 0xE3, ++ 0x3B, 0x05, ++ ++ 0x3E, 0x50, 0x56, 0x9F, ++ 0x3B, 0x3F, 0x4F, 0xE9, ++ ++ 0x1E, 0x8F, 0x51, 0x9F, ++ 0x00, 0xE0, ++ 0xAC, 0x20, ++ ++ 0x2D, 0x44, 0x4C, 0xB4, ++ 0x2C, 0x1C, 0xC0, 0xAF, ++ ++ 0x25, 0x44, 0x54, 0xB4, ++ 0x00, 0xE0, ++ 0xC8, 0x30, ++ ++ 0x30, 0x46, 0x30, 0xAF, ++ 0x1B, 0x1B, 0x48, 0xAF, ++ ++ 0x00, 0xE0, ++ 0x25, 0x20, ++ 0x38, 0x2C, 0x4F, 0xE9, ++ ++ 0x86, 0x80, 0x57, 0xE9, ++ 0x38, 0x1D, 0x6F, 0x8F, ++ ++ 0x28, 0x74, ++ 0x00, 0xE0, ++ 0x0D, 0x44, 0x4C, 0xB0, ++ ++ 0x05, 0x44, 0x54, 0xB0, ++ 0x2D, 0x20, ++ 0x9B, 0x10, ++ ++ 0x82, 0x3E, 0x57, 0xE9, ++ 0x32, 0xF0, 0x1B, 0xCD, ++ ++ 0x1E, 0xBD, 0x59, 0x9F, ++ 0x83, 0x1E, 0x57, 0xE9, ++ ++ 0x38, 0x47, 0x38, 0xAF, ++ 0x34, 0x20, ++ 0x2A, 0x30, ++ ++ 0x00, 0xE0, ++ 0x0D, 0x20, ++ 0x32, 0x20, ++ 0x05, 0x20, ++ ++ 0x87, 0x80, 0x57, 0xE9, ++ 0x1F, 0x54, 0x57, 0x9F, ++ ++ 0x17, 0x42, 0x56, 0x9F, ++ 0x00, 0xE0, ++ 0x3B, 0x6A, ++ ++ 0x3F, 0x8F, 0x51, 0x9F, ++ 0x37, 0x1E, 0x4F, 0xE9, ++ ++ 0x37, 0x32, 0x2A, 0xAF, ++ 0x00, 0xE0, ++ 0x32, 0x00, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x27, 0xC0, 0x44, 0xC0, ++ ++ 0x36, 0x1F, 0x4F, 0xE9, ++ 0x1F, 0x1F, 0x26, 0xDF, ++ ++ 0x37, 0x1B, 0x37, 0xBF, ++ 0x17, 0x26, 0x17, 0xDF, ++ ++ 0x3E, 0x17, 0x4F, 0xE9, ++ 0x3F, 0x3F, 0x4F, 0xE9, ++ ++ 0x34, 0x1F, 0x34, 0xAF, ++ 0x2B, 0x05, ++ 0xA7, 0x20, ++ ++ 0x33, 0x2B, 0x37, 0xDF, ++ 0x27, 0x17, 0xC0, 0xAF, ++ ++ 0x34, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2D, 0x21, 0x1A, 0xB0, ++ 0x25, 0x21, 0x31, 0xB0, ++ ++ 0x0D, 0x21, 0x1A, 0xB2, ++ 0x05, 0x21, 0x31, 0xB2, ++ ++ 0x03, 0x80, 0x2A, 0xEA, ++ 0x17, 0xC1, 0x2B, 0xBD, ++ ++ 0x2D, 0x20, ++ 0x25, 0x20, ++ 0x05, 0x20, ++ 0x0D, 0x20, ++ ++ 0xB3, 0x68, ++ 0x97, 0x25, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0xC0, 0x33, 0xAF, ++ 0x2F, 0xC0, 0x21, 0xC0, ++ ++ 0x16, 0x42, 0x56, 0x9F, ++ 0x3C, 0x27, 0x4F, 0xE9, ++ ++ 0x1E, 0x62, 0x57, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x21, 0x31, 0xB4, ++ 0x2D, 0x21, 0x1A, 0xB4, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x33, 0x05, ++ 0x00, 0xE0, ++ 0x28, 0x19, 0x60, 0xEC, ++ ++ 0x0D, 0x21, 0x1A, 0xB6, ++ 0x05, 0x21, 0x31, 0xB6, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0xE0, ++ 0x2F, 0x20, ++ ++ 0x23, 0x3B, 0x33, 0xAD, ++ 0x1E, 0x26, 0x1E, 0xDF, ++ ++ 0xA7, 0x1E, 0x4F, 0xE9, ++ 0x17, 0x26, 0x16, 0xDF, ++ ++ 0x2D, 0x20, ++ 0x00, 0xE0, ++ 0xA8, 0x3F, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x1E, 0xAF, ++ 0x25, 0x20, ++ 0x00, 0xE0, ++ ++ 0xA4, 0x16, 0x4F, 0xE9, ++ 0x0F, 0xC0, 0x21, 0xC2, ++ ++ 0xA6, 0x80, 0x4F, 0xE9, ++ 0x1F, 0x62, 0x57, 0x9F, ++ ++ 0x0D, 0x20, ++ 0x05, 0x20, ++ 0x2F, 0xC0, 0x21, 0xC6, ++ ++ 0x3F, 0x2F, 0x5D, 0x9F, ++ 0x00, 0xE0, ++ 0x0F, 0x20, ++ ++ 0x17, 0x50, 0x56, 0x9F, ++ 0xA5, 0x37, 0x4F, 0xE9, ++ ++ 0x06, 0xC0, 0x21, 0xC4, ++ 0x0F, 0x17, 0x0F, 0xAF, ++ ++ 0x37, 0x0F, 0x5C, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2F, 0x20, ++ 0x00, 0xE0, ++ 0xA3, 0x80, 0x4F, 0xE9, ++ ++ 0x06, 0x20, ++ 0x00, 0xE0, ++ 0x1F, 0x26, 0x1F, 0xDF, ++ ++ 0x17, 0x26, 0x17, 0xDF, ++ 0x35, 0x17, 0x4F, 0xE9, ++ ++ 0xA1, 0x1F, 0x4F, 0xE9, ++ 0xA2, 0x3F, 0x4F, 0xE9, ++ ++ 0x06, 0x06, 0x1F, 0xAF, ++ 0x39, 0x37, 0x4F, 0xE9, ++ ++ 0x2F, 0x2F, 0x17, 0xAF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xA0, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x31, 0x80, 0x4F, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x57, 0x39, 0x20, 0xE9, ++ ++ 0x16, 0x28, 0x20, 0xE9, ++ 0x1D, 0x3B, 0x20, 0xE9, ++ ++ 0x1E, 0x2B, 0x20, 0xE9, ++ 0x2B, 0x32, 0x20, 0xE9, ++ ++ 0x1C, 0x23, 0x20, 0xE9, ++ 0x57, 0x36, 0x20, 0xE9, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x40, 0x40, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x90, 0xE2, ++ 0x00, 0xE0, ++ ++ 0x68, 0xFF, 0x20, 0xEA, ++ 0x19, 0xC8, 0xC1, 0xCD, ++ ++ 0x1F, 0xD7, 0x18, 0xBD, ++ 0x3F, 0xD7, 0x22, 0xBD, ++ ++ 0x9F, 0x41, 0x49, 0xBD, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x25, 0x41, 0x49, 0xBD, ++ 0x2D, 0x41, 0x51, 0xBD, ++ ++ 0x0D, 0x80, 0x07, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x35, 0x40, 0x48, 0xBD, ++ 0x3D, 0x40, 0x50, 0xBD, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x25, 0x30, ++ 0x2D, 0x30, ++ ++ 0x35, 0x30, ++ 0xB5, 0x30, ++ 0xBD, 0x30, ++ 0x3D, 0x30, ++ ++ 0x9C, 0xA7, 0x5B, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x67, 0xFF, 0x0A, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC9, 0x41, 0xC8, 0xEC, ++ 0x42, 0xE1, ++ 0x00, 0xE0, ++ ++ 0x65, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xC8, 0x40, 0xC0, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x62, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++}; ++ ++static unsigned char warp_g400_t2gz[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x78, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x69, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x25, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9F, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBE, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x7D, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gza[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x7C, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x6D, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x29, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x3D, 0xCF, 0x74, 0xC2, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9B, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBA, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x79, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzaf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x81, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x72, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x0F, 0xCF, 0x74, 0xC6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x55, 0xB6, ++ 0x02, 0x45, 0x65, 0xB6, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x96, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xB5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x74, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x7D, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x6E, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0F, 0xCF, 0x75, 0xC6, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x28, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x0F, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x54, 0xB4, ++ 0x02, 0x44, 0x64, 0xB4, ++ ++ 0x2A, 0x45, 0x55, 0xB6, ++ 0x1A, 0x45, 0x65, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x9A, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xBB, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x78, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzs[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x85, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x76, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x31, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA7, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x92, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xB2, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x70, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsa[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8A, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7B, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x36, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x8D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xAD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x6B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsaf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8E, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7F, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x3A, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB6, ++ 0x1A, 0x44, 0x64, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x55, 0xB6, ++ 0x02, 0x45, 0x65, 0xB6, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x89, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xA9, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x67, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_t2gzsf[] = { ++ ++ 0x00, 0x8A, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x0A, 0x40, 0x50, 0xBF, ++ 0x2A, 0x40, 0x60, 0xBF, ++ ++ 0x32, 0x41, 0x51, 0xBF, ++ 0x3A, 0x41, 0x61, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xD3, 0x6B, ++ 0x00, 0x8A, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x23, 0x9F, ++ 0x00, 0xE0, ++ 0x51, 0x04, ++ ++ 0x90, 0xE2, ++ 0x61, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x51, 0x41, 0xE0, 0xEC, ++ 0x39, 0x67, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x63, 0xA0, 0xE8, ++ ++ 0x61, 0x41, 0xE0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x8A, 0x80, 0x15, 0xEA, ++ 0x10, 0x04, ++ 0x20, 0x04, ++ ++ 0x61, 0x51, 0xE0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x52, 0xBF, ++ 0x0F, 0x52, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x62, 0xBF, ++ 0x1E, 0x51, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x0E, 0x61, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x50, 0xBD, ++ 0x22, 0x40, 0x60, 0xBD, ++ ++ 0x12, 0x41, 0x51, 0xBD, ++ 0x3A, 0x41, 0x61, 0xBD, ++ ++ 0xBF, 0x2F, 0x0E, 0xBD, ++ 0x97, 0xE2, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x35, 0x48, 0xB1, 0xE8, ++ 0x3D, 0x59, 0xB1, 0xE8, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x56, 0x31, 0x56, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x66, 0x31, 0x66, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x57, 0x39, 0x57, 0xBF, ++ 0x67, 0x39, 0x67, 0xBF, ++ ++ 0x7B, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x35, 0x00, ++ 0x3D, 0x00, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0x8D, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x75, 0xF8, 0xEC, ++ 0x35, 0x20, ++ 0x3D, 0x20, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x53, 0x53, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x0E, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x48, 0x35, 0x48, 0xBF, ++ 0x58, 0x35, 0x58, 0xBF, ++ ++ 0x68, 0x35, 0x68, 0xBF, ++ 0x49, 0x3D, 0x49, 0xBF, ++ ++ 0x59, 0x3D, 0x59, 0xBF, ++ 0x69, 0x3D, 0x69, 0xBF, ++ ++ 0x63, 0x63, 0x2D, 0xDF, ++ 0x4D, 0x7D, 0xF8, 0xEC, ++ ++ 0x59, 0xE3, ++ 0x00, 0xE0, ++ 0xB8, 0x38, 0x33, 0xBF, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x18, 0x3A, 0x41, 0xE9, ++ ++ 0x3F, 0x53, 0xA0, 0xE8, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x63, 0xA0, 0xE8, ++ ++ 0x50, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x50, 0x3C, 0xE9, ++ ++ 0x1F, 0x0F, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x59, 0x78, 0xF8, 0xEC, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x56, 0x3F, 0x56, 0xDF, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x66, 0x3D, 0x66, 0xDF, ++ ++ 0x1D, 0x32, 0x41, 0xE9, ++ 0x67, 0x3D, 0x67, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3F, 0x57, 0xDF, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x59, 0x3F, 0x59, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x69, 0x3D, 0x69, 0xDF, ++ ++ 0x48, 0x37, 0x48, 0xDF, ++ 0x58, 0x3F, 0x58, 0xDF, ++ ++ 0x68, 0x3D, 0x68, 0xDF, ++ 0x49, 0x37, 0x49, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x0F, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x54, 0xB0, ++ 0x02, 0x44, 0x64, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB2, ++ 0x1A, 0x44, 0x64, 0xB2, ++ ++ 0x36, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0F, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x54, 0xB4, ++ 0x1A, 0x44, 0x64, 0xB4, ++ ++ 0x0A, 0x45, 0x55, 0xB0, ++ 0x02, 0x45, 0x65, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB2, ++ 0x1A, 0x45, 0x65, 0xB2, ++ ++ 0x0A, 0x45, 0x55, 0xB4, ++ 0x02, 0x45, 0x65, 0xB4, ++ ++ 0x0F, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x0F, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x55, 0xB6, ++ 0x1A, 0x45, 0x65, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x56, 0xBF, ++ 0x1A, 0x46, 0x66, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x57, 0xBF, ++ 0x02, 0x47, 0x67, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x53, 0xBF, ++ 0x1A, 0x43, 0x63, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x48, 0x58, 0xBF, ++ 0x02, 0x48, 0x68, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x2A, 0x49, 0x59, 0xBF, ++ 0x1A, 0x49, 0x69, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x82, 0x30, 0x57, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x83, 0x38, 0x57, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x84, 0x31, 0x5E, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x85, 0x39, 0x5E, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x87, 0x77, 0x57, 0xE9, ++ 0x8B, 0x3E, 0xBF, 0xEA, ++ ++ 0x80, 0x30, 0x57, 0xE9, ++ 0x81, 0x38, 0x57, 0xE9, ++ ++ 0x82, 0x31, 0x57, 0xE9, ++ 0x86, 0x78, 0x57, 0xE9, ++ ++ 0x83, 0x39, 0x57, 0xE9, ++ 0x87, 0x79, 0x57, 0xE9, ++ ++ 0x30, 0x1F, 0x5F, 0xE9, ++ 0x8A, 0x34, 0x20, 0xE9, ++ ++ 0x8B, 0x3C, 0x20, 0xE9, ++ 0x37, 0x50, 0x60, 0xBD, ++ ++ 0x57, 0x0D, 0x20, 0xE9, ++ 0x35, 0x51, 0x61, 0xBD, ++ ++ 0x2B, 0x50, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x0E, 0x77, ++ ++ 0x24, 0x51, 0x20, 0xE9, ++ 0x8D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x0E, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x0B, 0x46, 0xA0, 0xE8, ++ 0x1B, 0x56, 0xA0, 0xE8, ++ ++ 0x2B, 0x66, 0xA0, 0xE8, ++ 0x0C, 0x47, 0xA0, 0xE8, ++ ++ 0x1C, 0x57, 0xA0, 0xE8, ++ 0x2C, 0x67, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x57, 0x80, 0x57, 0xCF, ++ ++ 0x66, 0x33, 0x66, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x67, 0x3B, 0x67, 0xCF, ++ ++ 0x0B, 0x48, 0xA0, 0xE8, ++ 0x1B, 0x58, 0xA0, 0xE8, ++ ++ 0x2B, 0x68, 0xA0, 0xE8, ++ 0x0C, 0x49, 0xA0, 0xE8, ++ ++ 0x1C, 0x59, 0xA0, 0xE8, ++ 0x2C, 0x69, 0xA0, 0xE8, ++ ++ 0x0B, 0x00, ++ 0x1B, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x00, ++ 0x1C, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x65, ++ 0x1B, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0C, 0x65, ++ 0x1C, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x0B, 0x1B, 0x60, 0xEC, ++ 0x34, 0xD7, 0x34, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x0C, 0x1C, 0x60, 0xEC, ++ ++ 0x3C, 0xD7, 0x3C, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x0B, 0x2B, 0xDE, 0xE8, ++ 0x1B, 0x80, 0xDE, 0xE8, ++ ++ 0x34, 0x80, 0x34, 0xBD, ++ 0x3C, 0x80, 0x3C, 0xBD, ++ ++ 0x33, 0xD7, 0x0B, 0xBD, ++ 0x3B, 0xD7, 0x1B, 0xBD, ++ ++ 0x48, 0x80, 0x48, 0xCF, ++ 0x59, 0x80, 0x59, 0xCF, ++ ++ 0x68, 0x33, 0x68, 0xCF, ++ 0x49, 0x3B, 0x49, 0xCF, ++ ++ 0xAD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x58, 0x33, 0x58, 0xCF, ++ 0x69, 0x3B, 0x69, 0xCF, ++ ++ 0x6B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgz[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x58, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4A, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x1D, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAF, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD6, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x9D, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgza[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x5C, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4E, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x3D, 0xCF, 0x74, 0xC2, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x20, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAB, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD3, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x99, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzaf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x61, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x53, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x26, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x27, 0xCF, 0x74, 0xC6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x4D, 0xB6, ++ 0x02, 0x45, 0x55, 0xB6, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xA6, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xCD, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x94, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x5D, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x4F, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x34, 0x80, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x27, 0xCF, 0x75, 0xC6, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x20, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x3D, 0xCF, 0x74, 0xC2, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x27, 0x20, 0xE9, ++ ++ 0x0A, 0x44, 0x4C, 0xB4, ++ 0x02, 0x44, 0x54, 0xB4, ++ ++ 0x2A, 0x45, 0x4D, 0xB6, ++ 0x1A, 0x45, 0x55, 0xB6, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x38, 0x3D, 0x20, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xAA, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xD3, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x98, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzs[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x65, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x57, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x29, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA7, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0xA2, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xCA, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x90, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsa[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6A, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x5C, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x9D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x8B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsaf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6E, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x60, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x32, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x74, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9C, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB6, ++ 0x1A, 0x44, 0x54, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x45, 0x4D, 0xB6, ++ 0x02, 0x45, 0x55, 0xB6, ++ ++ 0x3D, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x3D, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x9D, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x9E, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x30, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x38, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x99, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC1, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x87, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; ++ ++static unsigned char warp_g400_tgzsf[] = { ++ ++ 0x00, 0x88, 0x98, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++ 0xFF, 0x80, 0xC0, 0xE9, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x22, 0x40, 0x48, 0xBF, ++ 0x2A, 0x40, 0x50, 0xBF, ++ ++ 0x32, 0x41, 0x49, 0xBF, ++ 0x3A, 0x41, 0x51, 0xBF, ++ ++ 0xC3, 0x6B, ++ 0xCB, 0x6B, ++ 0x00, 0x88, 0x98, 0xE9, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x96, 0xE2, ++ 0x41, 0x04, ++ ++ 0x7B, 0x43, 0xA0, 0xE8, ++ 0x73, 0x4B, 0xA0, 0xE8, ++ ++ 0xAD, 0xEE, 0x29, 0x9F, ++ 0x00, 0xE0, ++ 0x49, 0x04, ++ ++ 0x90, 0xE2, ++ 0x51, 0x04, ++ 0x31, 0x46, 0xB1, 0xE8, ++ ++ 0x49, 0x41, 0xC0, 0xEC, ++ 0x39, 0x57, 0xB1, 0xE8, ++ ++ 0x00, 0x04, ++ 0x46, 0xE2, ++ 0x73, 0x53, 0xA0, 0xE8, ++ ++ 0x51, 0x41, 0xC0, 0xEC, ++ 0x31, 0x00, ++ 0x39, 0x00, ++ ++ 0x6A, 0x80, 0x15, 0xEA, ++ 0x08, 0x04, ++ 0x10, 0x04, ++ ++ 0x51, 0x49, 0xC0, 0xEC, ++ 0x2F, 0x41, 0x60, 0xEA, ++ ++ 0x31, 0x20, ++ 0x39, 0x20, ++ 0x1F, 0x42, 0xA0, 0xE8, ++ ++ 0x2A, 0x42, 0x4A, 0xBF, ++ 0x27, 0x4A, 0xA0, 0xE8, ++ ++ 0x1A, 0x42, 0x52, 0xBF, ++ 0x1E, 0x49, 0x60, 0xEA, ++ ++ 0x73, 0x7B, 0xC8, 0xEC, ++ 0x26, 0x51, 0x60, 0xEA, ++ ++ 0x32, 0x40, 0x48, 0xBD, ++ 0x22, 0x40, 0x50, 0xBD, ++ ++ 0x12, 0x41, 0x49, 0xBD, ++ 0x3A, 0x41, 0x51, 0xBD, ++ ++ 0xBF, 0x2F, 0x26, 0xBD, ++ 0x00, 0xE0, ++ 0x7B, 0x72, ++ ++ 0x32, 0x20, ++ 0x22, 0x20, ++ 0x12, 0x20, ++ 0x3A, 0x20, ++ ++ 0x46, 0x31, 0x46, 0xBF, ++ 0x4E, 0x31, 0x4E, 0xBF, ++ ++ 0xB3, 0xE2, 0x2D, 0x9F, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x56, 0x31, 0x56, 0xBF, ++ 0x47, 0x39, 0x47, 0xBF, ++ ++ 0x4F, 0x39, 0x4F, 0xBF, ++ 0x57, 0x39, 0x57, 0xBF, ++ ++ 0x5C, 0x80, 0x07, 0xEA, ++ 0x24, 0x41, 0x20, 0xE9, ++ ++ 0x42, 0x73, 0xF8, 0xEC, ++ 0x00, 0xE0, ++ 0x2D, 0x73, ++ ++ 0x33, 0x72, ++ 0x0C, 0xE3, ++ 0xA5, 0x2F, 0x1E, 0xBD, ++ ++ 0x43, 0x43, 0x2D, 0xDF, ++ 0x4B, 0x4B, 0x2D, 0xDF, ++ ++ 0xAE, 0x1E, 0x26, 0xBD, ++ 0x58, 0xE3, ++ 0x33, 0x66, ++ ++ 0x53, 0x53, 0x2D, 0xDF, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0xB8, 0x38, 0x33, 0xBF, ++ 0x00, 0xE0, ++ 0x59, 0xE3, ++ ++ 0x1E, 0x12, 0x41, 0xE9, ++ 0x1A, 0x22, 0x41, 0xE9, ++ ++ 0x2B, 0x40, 0x3D, 0xE9, ++ 0x3F, 0x4B, 0xA0, 0xE8, ++ ++ 0x2D, 0x73, ++ 0x30, 0x76, ++ 0x05, 0x80, 0x3D, 0xEA, ++ ++ 0x37, 0x43, 0xA0, 0xE8, ++ 0x3D, 0x53, 0xA0, 0xE8, ++ ++ 0x48, 0x70, 0xF8, 0xEC, ++ 0x2B, 0x48, 0x3C, 0xE9, ++ ++ 0x1F, 0x27, 0xBC, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x15, 0xC0, 0x20, 0xE9, ++ 0x15, 0xC0, 0x20, 0xE9, ++ ++ 0x18, 0x3A, 0x41, 0xE9, ++ 0x1D, 0x32, 0x41, 0xE9, ++ ++ 0x2A, 0x40, 0x20, 0xE9, ++ 0x56, 0x3D, 0x56, 0xDF, ++ ++ 0x46, 0x37, 0x46, 0xDF, ++ 0x4E, 0x3F, 0x4E, 0xDF, ++ ++ 0x16, 0x30, 0x20, 0xE9, ++ 0x4F, 0x3F, 0x4F, 0xDF, ++ ++ 0x47, 0x37, 0x47, 0xDF, ++ 0x57, 0x3D, 0x57, 0xDF, ++ ++ 0x32, 0x32, 0x2D, 0xDF, ++ 0x22, 0x22, 0x2D, 0xDF, ++ ++ 0x12, 0x12, 0x2D, 0xDF, ++ 0x3A, 0x3A, 0x2D, 0xDF, ++ ++ 0x27, 0xCF, 0x74, 0xC2, ++ 0x37, 0xCF, 0x74, 0xC4, ++ ++ 0x0A, 0x44, 0x4C, 0xB0, ++ 0x02, 0x44, 0x54, 0xB0, ++ ++ 0x3D, 0xCF, 0x74, 0xC0, ++ 0x34, 0x37, 0x20, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x38, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3C, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB2, ++ 0x1A, 0x44, 0x54, 0xB2, ++ ++ 0x2E, 0x80, 0x3A, 0xEA, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x27, 0xCF, 0x75, 0xC0, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x32, 0x31, 0x5F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x33, 0x39, 0x5F, 0xE9, ++ ++ 0x3D, 0xCF, 0x75, 0xC2, ++ 0x37, 0xCF, 0x75, 0xC4, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA6, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA3, 0x3D, 0x20, 0xE9, ++ ++ 0x2A, 0x44, 0x4C, 0xB4, ++ 0x1A, 0x44, 0x54, 0xB4, ++ ++ 0x0A, 0x45, 0x4D, 0xB0, ++ 0x02, 0x45, 0x55, 0xB0, ++ ++ 0x88, 0x73, 0x5E, 0xE9, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA0, 0x37, 0x20, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x3E, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x3F, 0x38, 0x4F, 0xE9, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x3A, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x3B, 0x39, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB2, ++ 0x1A, 0x45, 0x55, 0xB2, ++ ++ 0x0A, 0x45, 0x4D, 0xB4, ++ 0x02, 0x45, 0x55, 0xB4, ++ ++ 0x27, 0xCF, 0x75, 0xC6, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0xA7, 0x30, 0x4F, 0xE9, ++ 0x0A, 0x20, ++ 0x02, 0x20, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x31, 0x27, 0x20, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA8, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x45, 0x4D, 0xB6, ++ 0x1A, 0x45, 0x55, 0xB6, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x36, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x37, 0x39, 0x4F, 0xE9, ++ ++ 0x00, 0x80, 0x00, 0xE8, ++ 0x2A, 0x20, ++ 0x1A, 0x20, ++ ++ 0x2A, 0x46, 0x4E, 0xBF, ++ 0x1A, 0x46, 0x56, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA4, 0x31, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA5, 0x39, 0x4F, 0xE9, ++ ++ 0x0A, 0x47, 0x4F, 0xBF, ++ 0x02, 0x47, 0x57, 0xBF, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0xA1, 0x30, 0x4F, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0xA2, 0x38, 0x4F, 0xE9, ++ ++ 0x2A, 0x43, 0x4B, 0xBF, ++ 0x1A, 0x43, 0x53, 0xBF, ++ ++ 0x30, 0x50, 0x2E, 0x9F, ++ 0x35, 0x31, 0x4F, 0xE9, ++ ++ 0x38, 0x21, 0x2C, 0x9F, ++ 0x39, 0x39, 0x4F, 0xE9, ++ ++ 0x31, 0x53, 0x2F, 0x9F, ++ 0x80, 0x31, 0x57, 0xE9, ++ ++ 0x39, 0xE5, 0x2C, 0x9F, ++ 0x81, 0x39, 0x57, 0xE9, ++ ++ 0x37, 0x48, 0x50, 0xBD, ++ 0x8A, 0x36, 0x20, 0xE9, ++ ++ 0x86, 0x76, 0x57, 0xE9, ++ 0x8B, 0x3E, 0x20, 0xE9, ++ ++ 0x82, 0x30, 0x57, 0xE9, ++ 0x87, 0x77, 0x57, 0xE9, ++ ++ 0x83, 0x38, 0x57, 0xE9, ++ 0x35, 0x49, 0x51, 0xBD, ++ ++ 0x84, 0x31, 0x5E, 0xE9, ++ 0x30, 0x1F, 0x5F, 0xE9, ++ ++ 0x85, 0x39, 0x5E, 0xE9, ++ 0x57, 0x25, 0x20, 0xE9, ++ ++ 0x2B, 0x48, 0x20, 0xE9, ++ 0x1D, 0x37, 0xE1, 0xEA, ++ ++ 0x1E, 0x35, 0xE1, 0xEA, ++ 0x00, 0xE0, ++ 0x26, 0x77, ++ ++ 0x24, 0x49, 0x20, 0xE9, ++ 0x9D, 0xFF, 0x20, 0xEA, ++ ++ 0x16, 0x26, 0x20, 0xE9, ++ 0x57, 0x2E, 0xBF, 0xEA, ++ ++ 0x1C, 0x46, 0xA0, 0xE8, ++ 0x23, 0x4E, 0xA0, 0xE8, ++ ++ 0x2B, 0x56, 0xA0, 0xE8, ++ 0x1D, 0x47, 0xA0, 0xE8, ++ ++ 0x24, 0x4F, 0xA0, 0xE8, ++ 0x2C, 0x57, 0xA0, 0xE8, ++ ++ 0x1C, 0x00, ++ 0x23, 0x00, ++ 0x2B, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x00, ++ 0x24, 0x00, ++ 0x2C, 0x00, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x65, ++ 0x23, 0x65, ++ 0x2B, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1D, 0x65, ++ 0x24, 0x65, ++ 0x2C, 0x65, ++ 0x00, 0xE0, ++ ++ 0x1C, 0x23, 0x60, 0xEC, ++ 0x36, 0xD7, 0x36, 0xAD, ++ ++ 0x2B, 0x80, 0x60, 0xEC, ++ 0x1D, 0x24, 0x60, 0xEC, ++ ++ 0x3E, 0xD7, 0x3E, 0xAD, ++ 0x2C, 0x80, 0x60, 0xEC, ++ ++ 0x1C, 0x2B, 0xDE, 0xE8, ++ 0x23, 0x80, 0xDE, 0xE8, ++ ++ 0x36, 0x80, 0x36, 0xBD, ++ 0x3E, 0x80, 0x3E, 0xBD, ++ ++ 0x33, 0xD7, 0x1C, 0xBD, ++ 0x3B, 0xD7, 0x23, 0xBD, ++ ++ 0x46, 0x80, 0x46, 0xCF, ++ 0x4F, 0x80, 0x4F, 0xCF, ++ ++ 0x56, 0x33, 0x56, 0xCF, ++ 0x47, 0x3B, 0x47, 0xCF, ++ ++ 0xC5, 0xFF, 0x20, 0xEA, ++ 0x00, 0x80, 0x00, 0xE8, ++ ++ 0x4E, 0x33, 0x4E, 0xCF, ++ 0x57, 0x3B, 0x57, 0xCF, ++ ++ 0x8B, 0xFF, 0x20, 0xEA, ++ 0x57, 0xC0, 0xBF, 0xEA, ++ ++ 0x00, 0x80, 0xA0, 0xE9, ++ 0x00, 0x00, 0xD8, 0xEC, ++ ++}; +diff -Nurd git/drivers/gpu/drm-tungsten/mga_warp.c git-nokia/drivers/gpu/drm-tungsten/mga_warp.c +--- git/drivers/gpu/drm-tungsten/mga_warp.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/mga_warp.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,198 @@ ++/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- ++ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "mga_drm.h" ++#include "mga_drv.h" ++#include "mga_ucode.h" ++ ++#define MGA_WARP_CODE_ALIGN 256 /* in bytes */ ++ ++#define WARP_UCODE_SIZE( which ) \ ++ ((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN) ++ ++#define WARP_UCODE_INSTALL( which, where ) \ ++do { \ ++ DRM_DEBUG( " pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase );\ ++ dev_priv->warp_pipe_phys[where] = pcbase; \ ++ memcpy( vcbase, which, sizeof(which) ); \ ++ pcbase += WARP_UCODE_SIZE( which ); \ ++ vcbase += WARP_UCODE_SIZE( which ); \ ++} while (0) ++ ++static const unsigned int mga_warp_g400_microcode_size = ++ (WARP_UCODE_SIZE(warp_g400_tgz) + ++ WARP_UCODE_SIZE(warp_g400_tgza) + ++ WARP_UCODE_SIZE(warp_g400_tgzaf) + ++ WARP_UCODE_SIZE(warp_g400_tgzf) + ++ WARP_UCODE_SIZE(warp_g400_tgzs) + ++ WARP_UCODE_SIZE(warp_g400_tgzsa) + ++ WARP_UCODE_SIZE(warp_g400_tgzsaf) + ++ WARP_UCODE_SIZE(warp_g400_tgzsf) + ++ WARP_UCODE_SIZE(warp_g400_t2gz) + ++ WARP_UCODE_SIZE(warp_g400_t2gza) + ++ WARP_UCODE_SIZE(warp_g400_t2gzaf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzs) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsa) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsaf) + ++ WARP_UCODE_SIZE(warp_g400_t2gzsf)); ++ ++static const unsigned int mga_warp_g200_microcode_size = ++ (WARP_UCODE_SIZE(warp_g200_tgz) + ++ WARP_UCODE_SIZE(warp_g200_tgza) + ++ WARP_UCODE_SIZE(warp_g200_tgzaf) + ++ WARP_UCODE_SIZE(warp_g200_tgzf) + ++ WARP_UCODE_SIZE(warp_g200_tgzs) + ++ WARP_UCODE_SIZE(warp_g200_tgzsa) + ++ WARP_UCODE_SIZE(warp_g200_tgzsaf) + ++ WARP_UCODE_SIZE(warp_g200_tgzsf)); ++ ++ ++unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv) ++{ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ return PAGE_ALIGN(mga_warp_g400_microcode_size); ++ case MGA_CARD_TYPE_G200: ++ return PAGE_ALIGN(mga_warp_g200_microcode_size); ++ default: ++ DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset); ++ return 0; ++ } ++} ++ ++static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv) ++{ ++ unsigned char *vcbase = dev_priv->warp->handle; ++ unsigned long pcbase = dev_priv->warp->offset; ++ ++ memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); ++ ++ WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ); ++ WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF); ++ WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA); ++ WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF); ++ WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS); ++ WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF); ++ WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA); ++ WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF); ++ ++ WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ); ++ WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF); ++ WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA); ++ WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF); ++ WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA); ++ WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF); ++ ++ return 0; ++} ++ ++static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv) ++{ ++ unsigned char *vcbase = dev_priv->warp->handle; ++ unsigned long pcbase = dev_priv->warp->offset; ++ ++ memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); ++ ++ WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ); ++ WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF); ++ WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA); ++ WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF); ++ WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS); ++ WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF); ++ WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA); ++ WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF); ++ ++ return 0; ++} ++ ++int mga_warp_install_microcode(drm_mga_private_t * dev_priv) ++{ ++ const unsigned int size = mga_warp_microcode_size(dev_priv); ++ ++ DRM_DEBUG("MGA ucode size = %d bytes\n", size); ++ if (size > dev_priv->warp->size) { ++ DRM_ERROR("microcode too large! (%u > %lu)\n", ++ size, dev_priv->warp->size); ++ return -ENOMEM; ++ } ++ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ return mga_warp_install_g400_microcode(dev_priv); ++ case MGA_CARD_TYPE_G200: ++ return mga_warp_install_g200_microcode(dev_priv); ++ default: ++ return -EINVAL; ++ } ++} ++ ++#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) ++ ++int mga_warp_init(drm_mga_private_t * dev_priv) ++{ ++ u32 wmisc; ++ ++ /* FIXME: Get rid of these damned magic numbers... ++ */ ++ switch (dev_priv->chipset) { ++ case MGA_CARD_TYPE_G400: ++ case MGA_CARD_TYPE_G550: ++ MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); ++ MGA_WRITE(MGA_WGETMSB, 0x00000E00); ++ MGA_WRITE(MGA_WVRTXSZ, 0x00001807); ++ MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); ++ break; ++ case MGA_CARD_TYPE_G200: ++ MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); ++ MGA_WRITE(MGA_WGETMSB, 0x1606); ++ MGA_WRITE(MGA_WVRTXSZ, 7); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | ++ MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); ++ wmisc = MGA_READ(MGA_WMISC); ++ if (wmisc != WMISC_EXPECTED) { ++ DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", ++ wmisc, WMISC_EXPECTED); ++ return -EINVAL; ++ } ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_bo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c +--- git/drivers/gpu/drm-tungsten/nouveau_bo.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,296 @@ ++/* ++ * Copyright 2007 Dave Airlied ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++/* ++ * Authors: Dave Airlied ++ * Ben Skeggs ++ * Jeremy Kolb ++ */ ++ ++#include "drmP.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static struct drm_ttm_backend * ++nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ return drm_agp_init_ttm(dev); ++ case NOUVEAU_GART_SGDMA: ++ return nouveau_sgdma_init_ttm(dev); ++ default: ++ DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); ++ break; ++ } ++ ++ return NULL; ++} ++ ++static int ++nouveau_bo_fence_type(struct drm_buffer_object *bo, ++ uint32_t *fclass, uint32_t *type) ++{ ++ /* When we get called, *fclass is set to the requested fence class */ ++ ++ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) ++ *type = 3; ++ else ++ *type = 1; ++ return 0; ++ ++} ++ ++static int ++nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) ++{ ++ /* We'll do this from user space. */ ++ return 0; ++} ++ ++static int ++nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, ++ struct drm_mem_type_manager *man) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ switch (type) { ++ case DRM_BO_MEM_LOCAL: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CACHED; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_VRAM: ++ man->flags = _DRM_FLAG_MEMTYPE_FIXED | ++ _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->io_addr = NULL; ++ man->drm_bus_maptype = _DRM_FRAME_BUFFER; ++ man->io_offset = drm_get_resource_start(dev, 1); ++ man->io_size = drm_get_resource_len(dev, 1); ++ if (man->io_size > nouveau_mem_fb_amount(dev)) ++ man->io_size = nouveau_mem_fb_amount(dev); ++ break; ++ case DRM_BO_MEM_PRIV0: ++ /* Unmappable VRAM */ ++ man->flags = _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = 0; ++ break; ++ case DRM_BO_MEM_TT: ++ switch (dev_priv->gart_info.type) { ++ case NOUVEAU_GART_AGP: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_NEEDS_IOREMAP; ++ man->drm_bus_maptype = _DRM_AGP; ++ break; ++ case NOUVEAU_GART_SGDMA: ++ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | ++ _DRM_FLAG_MEMTYPE_CSELECT | ++ _DRM_FLAG_MEMTYPE_CMA; ++ man->drm_bus_maptype = _DRM_SCATTER_GATHER; ++ break; ++ default: ++ DRM_ERROR("Unknown GART type: %d\n", ++ dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ man->io_offset = dev_priv->gart_info.aper_base; ++ man->io_size = dev_priv->gart_info.aper_size; ++ man->io_addr = NULL; ++ break; ++ default: ++ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static uint64_t ++nouveau_bo_evict_flags(struct drm_buffer_object *bo) ++{ ++ switch (bo->mem.mem_type) { ++ case DRM_BO_MEM_LOCAL: ++ case DRM_BO_MEM_TT: ++ return DRM_BO_FLAG_MEM_LOCAL; ++ default: ++ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; ++ } ++ return 0; ++} ++ ++ ++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access ++ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. ++ */ ++static int ++nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ uint32_t srch, dsth, page_count; ++ ++ /* Can happen during init/takedown */ ++ if (!dchan->chan) ++ return -EINVAL; ++ ++ srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; ++ if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { ++ dchan->m2mf_dma_source = srch; ++ dchan->m2mf_dma_destin = dsth; ++ ++ BEGIN_RING(NvSubM2MF, ++ NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ } ++ ++ page_count = new_mem->num_pages; ++ while (page_count) { ++ int line_count = (page_count > 2047) ? 2047 : page_count; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); ++ OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); ++ OUT_RING (PAGE_SIZE); /* src_pitch */ ++ OUT_RING (PAGE_SIZE); /* dst_pitch */ ++ OUT_RING (PAGE_SIZE); /* line_length */ ++ OUT_RING (line_count); ++ OUT_RING ((1<<8)|(1<<0)); ++ OUT_RING (0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); ++ OUT_RING (0); ++ ++ page_count -= line_count; ++ } ++ ++ return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, ++ DRM_FENCE_TYPE_EXE, 0, new_mem); ++} ++ ++/* Flip pages into the GART and move if we can. */ ++static int ++nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_device *dev = bo->dev; ++ struct drm_bo_mem_reg tmp_mem; ++ int ret; ++ ++ tmp_mem = *new_mem; ++ tmp_mem.mm_node = NULL; ++ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | ++ DRM_BO_FLAG_CACHED | ++ DRM_BO_FLAG_FORCE_CACHING); ++ ++ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); ++ if (ret) ++ return ret; ++ ++ ret = drm_ttm_bind(bo->ttm, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); ++ if (ret) ++ goto out_cleanup; ++ ++ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); ++ ++out_cleanup: ++ if (tmp_mem.mm_node) { ++ mutex_lock(&dev->struct_mutex); ++ if (tmp_mem.mm_node != bo->pinned_node) ++ drm_mm_put_block(tmp_mem.mm_node); ++ tmp_mem.mm_node = NULL; ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ return ret; ++} ++ ++static int ++nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, ++ struct drm_bo_mem_reg *new_mem) ++{ ++ struct drm_bo_mem_reg *old_mem = &bo->mem; ++ ++ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else ++ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { ++ if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ else { ++ if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) ++ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_bo_flush_ttm(struct drm_ttm *ttm) ++{ ++} ++ ++static uint32_t nouveau_mem_prios[] = { ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_LOCAL ++}; ++static uint32_t nouveau_busy_prios[] = { ++ DRM_BO_MEM_TT, ++ DRM_BO_MEM_PRIV0, ++ DRM_BO_MEM_VRAM, ++ DRM_BO_MEM_LOCAL ++}; ++ ++struct drm_bo_driver nouveau_bo_driver = { ++ .mem_type_prio = nouveau_mem_prios, ++ .mem_busy_prio = nouveau_busy_prios, ++ .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), ++ .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), ++ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, ++ .fence_type = nouveau_bo_fence_type, ++ .invalidate_caches = nouveau_bo_invalidate_caches, ++ .init_mem_type = nouveau_bo_init_mem_type, ++ .evict_flags = nouveau_bo_evict_flags, ++ .move = nouveau_bo_move, ++ .ttm_cache_flush= nouveau_bo_flush_ttm, ++ .command_stream_barrier = NULL ++}; +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c +--- git/drivers/gpu/drm-tungsten/nouveau_dma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,172 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++int ++nouveau_dma_channel_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct mem_block *pushbuf; ++ int grclass, ret, i; ++ ++ DRM_DEBUG("\n"); ++ ++ pushbuf = nouveau_mem_alloc(dev, 0, 0x8000, ++ NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pushbuf) { ++ DRM_ERROR("Failed to allocate DMA push buffer\n"); ++ return -ENOMEM; ++ } ++ ++ /* Allocate channel */ ++ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2, ++ pushbuf, NvDmaFB, NvDmaTT); ++ if (ret) { ++ DRM_ERROR("Error allocating GPU channel: %d\n", ret); ++ return ret; ++ } ++ DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id); ++ ++ /* Map push buffer */ ++ drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev); ++ if (!dchan->chan->pushbuf_mem->map->handle) { ++ DRM_ERROR("Failed to ioremap push buffer\n"); ++ return -EINVAL; ++ } ++ dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle; ++ ++ /* Initialise DMA vars */ ++ dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2; ++ dchan->put = dchan->chan->pushbuf_base >> 2; ++ dchan->cur = dchan->put; ++ dchan->free = dchan->max - dchan->cur; ++ ++ /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ++ dchan->free -= NOUVEAU_DMA_SKIPS; ++ dchan->push_free = NOUVEAU_DMA_SKIPS; ++ for (i=0; i < NOUVEAU_DMA_SKIPS; i++) ++ OUT_RING(0); ++ ++ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */ ++ if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1, ++ &dchan->notify0_offset))) { ++ DRM_ERROR("Error allocating NvNotify0: %d\n", ret); ++ return ret; ++ } ++ ++ /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ++ if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT; ++ else grclass = NV50_MEMORY_TO_MEMORY_FORMAT; ++ if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) { ++ DRM_ERROR("Error creating NvM2MF: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF, ++ gpuobj, NULL))) { ++ DRM_ERROR("Error referencing NvM2MF: %d\n", ret); ++ return ret; ++ } ++ dchan->m2mf_dma_source = NvDmaFB; ++ dchan->m2mf_dma_destin = NvDmaFB; ++ ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1); ++ OUT_RING (NvM2MF); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1); ++ OUT_RING (NvNotify0); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); ++ OUT_RING (dchan->m2mf_dma_source); ++ OUT_RING (dchan->m2mf_dma_destin); ++ FIRE_RING(); ++ ++ return 0; ++} ++ ++void ++nouveau_dma_channel_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dchan->chan) { ++ nouveau_fifo_free(dchan->chan); ++ dchan->chan = NULL; ++ } ++} ++ ++#define READ_GET() ((NV_READ(dchan->chan->get) - \ ++ dchan->chan->pushbuf_base) >> 2) ++#define WRITE_PUT(val) do { \ ++ NV_WRITE(dchan->chan->put, \ ++ ((val) << 2) + dchan->chan->pushbuf_base); \ ++} while(0) ++ ++int ++nouveau_dma_wait(struct drm_device *dev, int size) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ uint32_t get; ++ ++ while (dchan->free < size) { ++ get = READ_GET(); ++ ++ if (dchan->put >= get) { ++ dchan->free = dchan->max - dchan->cur; ++ ++ if (dchan->free < size) { ++ dchan->push_free = 1; ++ OUT_RING(0x20000000|dchan->chan->pushbuf_base); ++ if (get <= NOUVEAU_DMA_SKIPS) { ++ /*corner case - will be idle*/ ++ if (dchan->put <= NOUVEAU_DMA_SKIPS) ++ WRITE_PUT(NOUVEAU_DMA_SKIPS + 1); ++ ++ do { ++ get = READ_GET(); ++ } while (get <= NOUVEAU_DMA_SKIPS); ++ } ++ ++ WRITE_PUT(NOUVEAU_DMA_SKIPS); ++ dchan->cur = dchan->put = NOUVEAU_DMA_SKIPS; ++ dchan->free = get - (NOUVEAU_DMA_SKIPS + 1); ++ } ++ } else { ++ dchan->free = get - dchan->cur - 1; ++ } ++ } ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.h git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h +--- git/drivers/gpu/drm-tungsten/nouveau_dma.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef __NOUVEAU_DMA_H__ ++#define __NOUVEAU_DMA_H__ ++ ++typedef enum { ++ NvSubM2MF = 0, ++} nouveau_subchannel_id_t; ++ ++typedef enum { ++ NvM2MF = 0x80039001, ++ NvDmaFB = 0x8003d001, ++ NvDmaTT = 0x8003d002, ++ NvNotify0 = 0x8003d003 ++} nouveau_object_handle_t; ++ ++#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000 ++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180 ++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184 ++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c ++ ++#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238 ++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c ++ ++#define BEGIN_RING(subc, mthd, cnt) do { \ ++ int push_size = (cnt) + 1; \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->free < push_size) { \ ++ if (nouveau_dma_wait(dev, push_size)) { \ ++ DRM_ERROR("FIFO timeout\n"); \ ++ break; \ ++ } \ ++ } \ ++ dchan->free -= push_size; \ ++ dchan->push_free = push_size; \ ++ OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \ ++} while(0) ++ ++#define OUT_RING(data) do { \ ++ if (dchan->push_free == 0) { \ ++ DRM_ERROR("no space left in packet\n"); \ ++ break; \ ++ } \ ++ dchan->pushbuf[dchan->cur++] = (data); \ ++ dchan->push_free--; \ ++} while(0) ++ ++#define FIRE_RING() do { \ ++ if (dchan->push_free) { \ ++ DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \ ++ break; \ ++ } \ ++ if (dchan->cur != dchan->put) { \ ++ DRM_MEMORYBARRIER(); \ ++ dchan->put = dchan->cur; \ ++ NV_WRITE(dchan->chan->put, dchan->put << 2); \ ++ } \ ++} while(0) ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drm.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h +--- git/drivers/gpu/drm-tungsten/nouveau_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,184 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRM_H__ ++#define __NOUVEAU_DRM_H__ ++ ++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11 ++ ++struct drm_nouveau_channel_alloc { ++ uint32_t fb_ctxdma_handle; ++ uint32_t tt_ctxdma_handle; ++ ++ int channel; ++ uint32_t put_base; ++ /* FIFO control regs */ ++ drm_handle_t ctrl; ++ int ctrl_size; ++ /* DMA command buffer */ ++ drm_handle_t cmdbuf; ++ int cmdbuf_size; ++ /* Notifier memory */ ++ drm_handle_t notifier; ++ int notifier_size; ++}; ++ ++struct drm_nouveau_channel_free { ++ int channel; ++}; ++ ++struct drm_nouveau_grobj_alloc { ++ int channel; ++ uint32_t handle; ++ int class; ++}; ++ ++#define NOUVEAU_MEM_ACCESS_RO 1 ++#define NOUVEAU_MEM_ACCESS_WO 2 ++#define NOUVEAU_MEM_ACCESS_RW 3 ++struct drm_nouveau_notifierobj_alloc { ++ int channel; ++ uint32_t handle; ++ int count; ++ ++ uint32_t offset; ++}; ++ ++struct drm_nouveau_gpuobj_free { ++ int channel; ++ uint32_t handle; ++}; ++ ++/* This is needed to avoid a race condition. ++ * Otherwise you may be writing in the fetch area. ++ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes? ++ */ ++#define NOUVEAU_DMA_SKIPS 8 ++ ++#define NOUVEAU_MEM_FB 0x00000001 ++#define NOUVEAU_MEM_AGP 0x00000002 ++#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 ++#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 ++#define NOUVEAU_MEM_PCI 0x00000010 ++#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020 ++#define NOUVEAU_MEM_PINNED 0x00000040 ++#define NOUVEAU_MEM_USER_BACKED 0x00000080 ++#define NOUVEAU_MEM_MAPPED 0x00000100 ++#define NOUVEAU_MEM_TILE 0x00000200 ++#define NOUVEAU_MEM_TILE_ZETA 0x00000400 ++#define NOUVEAU_MEM_INSTANCE 0x01000000 /* internal */ ++#define NOUVEAU_MEM_NOTIFIER 0x02000000 /* internal */ ++#define NOUVEAU_MEM_NOVM 0x04000000 /* internal */ ++#define NOUVEAU_MEM_USER 0x08000000 /* internal */ ++#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \ ++ NOUVEAU_MEM_NOTIFIER | \ ++ NOUVEAU_MEM_NOVM | \ ++ NOUVEAU_MEM_USER) ++ ++struct drm_nouveau_mem_alloc { ++ int flags; ++ int alignment; ++ uint64_t size; // in bytes ++ uint64_t offset; ++ drm_handle_t map_handle; ++}; ++ ++struct drm_nouveau_mem_free { ++ uint64_t offset; ++ int flags; ++}; ++ ++struct drm_nouveau_mem_tile { ++ uint64_t offset; ++ uint64_t delta; ++ uint64_t size; ++ int flags; ++}; ++ ++/* FIXME : maybe unify {GET,SET}PARAMs */ ++#define NOUVEAU_GETPARAM_PCI_VENDOR 3 ++#define NOUVEAU_GETPARAM_PCI_DEVICE 4 ++#define NOUVEAU_GETPARAM_BUS_TYPE 5 ++#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 ++#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 ++#define NOUVEAU_GETPARAM_FB_SIZE 8 ++#define NOUVEAU_GETPARAM_AGP_SIZE 9 ++#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 ++#define NOUVEAU_GETPARAM_CHIPSET_ID 11 ++struct drm_nouveau_getparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 ++#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 ++struct drm_nouveau_setparam { ++ uint64_t param; ++ uint64_t value; ++}; ++ ++enum nouveau_card_type { ++ NV_UNKNOWN =0, ++ NV_04 =4, ++ NV_05 =5, ++ NV_10 =10, ++ NV_11 =11, ++ NV_17 =17, ++ NV_20 =20, ++ NV_30 =30, ++ NV_40 =40, ++ NV_44 =44, ++ NV_50 =50, ++ NV_LAST =0xffff, ++}; ++ ++enum nouveau_bus_type { ++ NV_AGP =0, ++ NV_PCI =1, ++ NV_PCIE =2, ++}; ++ ++#define NOUVEAU_MAX_SAREA_CLIPRECTS 16 ++ ++struct drm_nouveau_sarea { ++ /* the cliprects */ ++ struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++}; ++ ++#define DRM_NOUVEAU_CARD_INIT 0x00 ++#define DRM_NOUVEAU_GETPARAM 0x01 ++#define DRM_NOUVEAU_SETPARAM 0x02 ++#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03 ++#define DRM_NOUVEAU_CHANNEL_FREE 0x04 ++#define DRM_NOUVEAU_GROBJ_ALLOC 0x05 ++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06 ++#define DRM_NOUVEAU_GPUOBJ_FREE 0x07 ++#define DRM_NOUVEAU_MEM_ALLOC 0x08 ++#define DRM_NOUVEAU_MEM_FREE 0x09 ++#define DRM_NOUVEAU_MEM_TILE 0x0a ++#define DRM_NOUVEAU_SUSPEND 0x0b ++#define DRM_NOUVEAU_RESUME 0x0c ++ ++#endif /* __NOUVEAU_DRM_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.c git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c +--- git/drivers/gpu/drm-tungsten/nouveau_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,120 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ }, ++ { ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), ++ .class = PCI_BASE_CLASS_DISPLAY << 16, ++ .class_mask = 0xff << 16, ++ } ++}; ++ ++extern struct drm_ioctl_desc nouveau_ioctls[]; ++extern int nouveau_max_ioctl; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .load = nouveau_load, ++ .firstopen = nouveau_firstopen, ++ .lastclose = nouveau_lastclose, ++ .unload = nouveau_unload, ++ .preclose = nouveau_preclose, ++ .irq_preinstall = nouveau_irq_preinstall, ++ .irq_postinstall = nouveau_irq_postinstall, ++ .irq_uninstall = nouveau_irq_uninstall, ++ .irq_handler = nouveau_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = nouveau_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = nouveau_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .bo_driver = &nouveau_bo_driver, ++ .fence_driver = &nouveau_fence_driver, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++#ifdef GIT_REVISION ++ .date = GIT_REVISION, ++#else ++ .date = DRIVER_DATE, ++#endif ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init nouveau_init(void) ++{ ++ driver.num_ioctls = nouveau_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit nouveau_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(nouveau_init); ++module_exit(nouveau_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h +--- git/drivers/gpu/drm-tungsten/nouveau_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,621 @@ ++/* ++ * Copyright 2005 Stephane Marchesin. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __NOUVEAU_DRV_H__ ++#define __NOUVEAU_DRV_H__ ++ ++#define DRIVER_AUTHOR "Stephane Marchesin" ++#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" ++ ++#define DRIVER_NAME "nouveau" ++#define DRIVER_DESC "nVidia Riva/TNT/GeForce" ++#define DRIVER_DATE "20060213" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 11 ++ ++#define NOUVEAU_FAMILY 0x0000FFFF ++#define NOUVEAU_FLAGS 0xFFFF0000 ++ ++#include "nouveau_drm.h" ++#include "nouveau_reg.h" ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ uint64_t start; ++ uint64_t size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++ int flags; ++ drm_local_map_t *map; ++ drm_handle_t map_handle; ++}; ++ ++enum nouveau_flags { ++ NV_NFORCE =0x10000000, ++ NV_NFORCE2 =0x20000000 ++}; ++ ++#define NVOBJ_ENGINE_SW 0 ++#define NVOBJ_ENGINE_GR 1 ++#define NVOBJ_ENGINE_INT 0xdeadbeef ++ ++#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) ++#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) ++#define NVOBJ_FLAG_ZERO_FREE (1 << 2) ++#define NVOBJ_FLAG_FAKE (1 << 3) ++struct nouveau_gpuobj { ++ struct list_head list; ++ ++ int im_channel; ++ struct mem_block *im_pramin; ++ struct mem_block *im_backing; ++ int im_bound; ++ ++ uint32_t flags; ++ int refcount; ++ ++ uint32_t engine; ++ uint32_t class; ++ ++ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *); ++ void *priv; ++}; ++ ++struct nouveau_gpuobj_ref { ++ struct list_head list; ++ ++ struct nouveau_gpuobj *gpuobj; ++ uint32_t instance; ++ ++ int channel; ++ int handle; ++}; ++ ++struct nouveau_channel ++{ ++ struct drm_device *dev; ++ int id; ++ ++ /* owner of this fifo */ ++ struct drm_file *file_priv; ++ /* mapping of the fifo itself */ ++ drm_local_map_t *map; ++ /* mapping of the regs controling the fifo */ ++ drm_local_map_t *regs; ++ ++ /* Fencing */ ++ uint32_t next_sequence; ++ ++ /* DMA push buffer */ ++ struct nouveau_gpuobj_ref *pushbuf; ++ struct mem_block *pushbuf_mem; ++ uint32_t pushbuf_base; ++ ++ /* FIFO user control regs */ ++ uint32_t user, user_size; ++ uint32_t put; ++ uint32_t get; ++ uint32_t ref_cnt; ++ ++ /* Notifier memory */ ++ struct mem_block *notifier_block; ++ struct mem_block *notifier_heap; ++ drm_local_map_t *notifier_map; ++ ++ /* PFIFO context */ ++ struct nouveau_gpuobj_ref *ramfc; ++ ++ /* PGRAPH context */ ++ /* XXX may be merge 2 pointers as private data ??? */ ++ struct nouveau_gpuobj_ref *ramin_grctx; ++ void *pgraph_ctx; ++ ++ /* NV50 VM */ ++ struct nouveau_gpuobj *vm_pd; ++ struct nouveau_gpuobj_ref *vm_gart_pt; ++ struct nouveau_gpuobj_ref *vm_vram_pt; ++ ++ /* Objects */ ++ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ ++ struct mem_block *ramin_heap; /* Private PRAMIN heap */ ++ struct nouveau_gpuobj_ref *ramht; /* Hash table */ ++ struct list_head ramht_refs; /* Objects referenced by RAMHT */ ++}; ++ ++struct nouveau_drm_channel { ++ struct nouveau_channel *chan; ++ ++ /* DMA state */ ++ int max, put, cur, free; ++ int push_free; ++ volatile uint32_t *pushbuf; ++ ++ /* Notifiers */ ++ uint32_t notify0_offset; ++ ++ /* Buffer moves */ ++ uint32_t m2mf_dma_source; ++ uint32_t m2mf_dma_destin; ++}; ++ ++struct nouveau_config { ++ struct { ++ int location; ++ int size; ++ } cmdbuf; ++}; ++ ++struct nouveau_instmem_engine { ++ void *priv; ++ ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ ++ int (*populate)(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++ void (*clear)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*bind)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); ++}; ++ ++struct nouveau_mc_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_timer_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++ uint64_t (*read)(struct drm_device *dev); ++}; ++ ++struct nouveau_fb_engine { ++ int (*init)(struct drm_device *dev); ++ void (*takedown)(struct drm_device *dev); ++}; ++ ++struct nouveau_fifo_engine { ++ void *priv; ++ ++ int channels; ++ ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*channel_id)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_pgraph_engine { ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ int (*load_context)(struct nouveau_channel *); ++ int (*save_context)(struct nouveau_channel *); ++}; ++ ++struct nouveau_engine { ++ struct nouveau_instmem_engine instmem; ++ struct nouveau_mc_engine mc; ++ struct nouveau_timer_engine timer; ++ struct nouveau_fb_engine fb; ++ struct nouveau_pgraph_engine graph; ++ struct nouveau_fifo_engine fifo; ++}; ++ ++#define NOUVEAU_MAX_CHANNEL_NR 128 ++struct drm_nouveau_private { ++ enum { ++ NOUVEAU_CARD_INIT_DOWN, ++ NOUVEAU_CARD_INIT_DONE, ++ NOUVEAU_CARD_INIT_FAILED ++ } init_state; ++ ++ int ttm; ++ ++ /* the card type, takes NV_* as values */ ++ int card_type; ++ /* exact chipset, derived from NV_PMC_BOOT_0 */ ++ int chipset; ++ int flags; ++ ++ drm_local_map_t *mmio; ++ drm_local_map_t *fb; ++ drm_local_map_t *ramin; /* NV40 onwards */ ++ ++ int fifo_alloc_count; ++ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; ++ ++ struct nouveau_engine Engine; ++ struct nouveau_drm_channel channel; ++ ++ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ ++ struct nouveau_gpuobj *ramht; ++ uint32_t ramin_rsvd_vram; ++ uint32_t ramht_offset; ++ uint32_t ramht_size; ++ uint32_t ramht_bits; ++ uint32_t ramfc_offset; ++ uint32_t ramfc_size; ++ uint32_t ramro_offset; ++ uint32_t ramro_size; ++ ++ /* base physical adresses */ ++ uint64_t fb_phys; ++ uint64_t fb_available_size; ++ ++ struct { ++ enum { ++ NOUVEAU_GART_NONE = 0, ++ NOUVEAU_GART_AGP, ++ NOUVEAU_GART_SGDMA ++ } type; ++ uint64_t aper_base; ++ uint64_t aper_size; ++ ++ struct nouveau_gpuobj *sg_ctxdma; ++ struct page *sg_dummy_page; ++ dma_addr_t sg_dummy_bus; ++ ++ /* nottm hack */ ++ struct drm_ttm_backend *sg_be; ++ unsigned long sg_handle; ++ } gart_info; ++ ++ /* G8x global VRAM page table */ ++ struct nouveau_gpuobj *vm_vram_pt; ++ ++ /* the mtrr covering the FB */ ++ int fb_mtrr; ++ ++ struct mem_block *agp_heap; ++ struct mem_block *fb_heap; ++ struct mem_block *fb_nomap_heap; ++ struct mem_block *ramin_heap; ++ struct mem_block *pci_heap; ++ ++ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ ++ uint32_t ctx_table_size; ++ struct nouveau_gpuobj_ref *ctx_table; ++ ++ struct nouveau_config config; ++ ++ struct list_head gpuobj_list; ++ ++ struct nouveau_suspend_resume { ++ uint32_t fifo_mode; ++ uint32_t graph_ctx_control; ++ uint32_t graph_state; ++ uint32_t *ramin_copy; ++ uint64_t ramin_size; ++ } susres; ++}; ++ ++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ ++ DRM_ERROR("called without init\n"); \ ++ return -EINVAL; \ ++ } \ ++} while(0) ++ ++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \ ++ struct drm_nouveau_private *nv = dev->dev_private; \ ++ if (!nouveau_fifo_owner(dev, (cl), (id))) { \ ++ DRM_ERROR("pid %d doesn't own channel %d\n", \ ++ DRM_CURRENTPID, (id)); \ ++ return -EPERM; \ ++ } \ ++ (ch) = nv->fifos[(id)]; \ ++} while(0) ++ ++/* nouveau_state.c */ ++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); ++extern int nouveau_load(struct drm_device *, unsigned long flags); ++extern int nouveau_firstopen(struct drm_device *); ++extern void nouveau_lastclose(struct drm_device *); ++extern int nouveau_unload(struct drm_device *); ++extern int nouveau_ioctl_getparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_setparam(struct drm_device *, void *data, ++ struct drm_file *); ++extern void nouveau_wait_for_idle(struct drm_device *); ++extern int nouveau_card_init(struct drm_device *); ++extern int nouveau_ioctl_card_init(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_suspend(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_resume(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_mem.c */ ++extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, ++ uint64_t size); ++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, ++ uint64_t size, int align2, ++ struct drm_file *, int tail); ++extern void nouveau_mem_takedown(struct mem_block **heap); ++extern void nouveau_mem_free_block(struct mem_block *); ++extern uint64_t nouveau_mem_fb_amount(struct drm_device *); ++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); ++extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_free(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_mem_tile(struct drm_device *, void *data, ++ struct drm_file *); ++extern struct mem_block* nouveau_mem_alloc(struct drm_device *, ++ int alignment, uint64_t size, ++ int flags, struct drm_file *); ++extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*); ++extern int nouveau_mem_init(struct drm_device *); ++extern int nouveau_mem_init_ttm(struct drm_device *); ++extern void nouveau_mem_close(struct drm_device *); ++ ++/* nouveau_notifier.c */ ++extern int nouveau_notifier_init_channel(struct nouveau_channel *); ++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); ++extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, ++ int cout, uint32_t *offset); ++extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_fifo.c */ ++extern int nouveau_fifo_init(struct drm_device *); ++extern int nouveau_fifo_ctx_size(struct drm_device *); ++extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *); ++extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *, ++ int channel); ++extern int nouveau_fifo_alloc(struct drm_device *dev, ++ struct nouveau_channel **chan, ++ struct drm_file *file_priv, ++ struct mem_block *pushbuf, ++ uint32_t fb_ctxdma, uint32_t tt_ctxdma); ++extern void nouveau_fifo_free(struct nouveau_channel *); ++extern int nouveau_channel_idle(struct nouveau_channel *chan); ++ ++/* nouveau_object.c */ ++extern int nouveau_gpuobj_early_init(struct drm_device *); ++extern int nouveau_gpuobj_init(struct drm_device *); ++extern void nouveau_gpuobj_takedown(struct drm_device *); ++extern void nouveau_gpuobj_late_takedown(struct drm_device *); ++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, ++ uint32_t vram_h, uint32_t tt_h); ++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); ++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, ++ uint32_t handle, struct nouveau_gpuobj *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_del(struct drm_device *, ++ struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret); ++extern int nouveau_gpuobj_new_ref(struct drm_device *, ++ struct nouveau_channel *alloc_chan, ++ struct nouveau_channel *ref_chan, ++ uint32_t handle, int size, int align, ++ uint32_t flags, struct nouveau_gpuobj_ref **); ++extern int nouveau_gpuobj_new_fake(struct drm_device *, ++ uint32_t p_offset, uint32_t b_offset, ++ uint32_t size, uint32_t flags, ++ struct nouveau_gpuobj **, ++ struct nouveau_gpuobj_ref**); ++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **); ++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, ++ uint64_t offset, uint64_t size, ++ int access, struct nouveau_gpuobj **, ++ uint32_t *o_ret); ++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, ++ struct nouveau_gpuobj **); ++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, ++ struct drm_file *); ++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, ++ struct drm_file *); ++ ++/* nouveau_irq.c */ ++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); ++extern void nouveau_irq_preinstall(struct drm_device *); ++extern int nouveau_irq_postinstall(struct drm_device *); ++extern void nouveau_irq_uninstall(struct drm_device *); ++ ++/* nouveau_sgdma.c */ ++extern int nouveau_sgdma_init(struct drm_device *); ++extern void nouveau_sgdma_takedown(struct drm_device *); ++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, ++ uint32_t *page); ++extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); ++extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); ++extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); ++ ++/* nouveau_dma.c */ ++extern int nouveau_dma_channel_init(struct drm_device *); ++extern void nouveau_dma_channel_takedown(struct drm_device *); ++extern int nouveau_dma_wait(struct drm_device *, int size); ++ ++/* nv04_fb.c */ ++extern int nv04_fb_init(struct drm_device *); ++extern void nv04_fb_takedown(struct drm_device *); ++ ++/* nv10_fb.c */ ++extern int nv10_fb_init(struct drm_device *); ++extern void nv10_fb_takedown(struct drm_device *); ++ ++/* nv40_fb.c */ ++extern int nv40_fb_init(struct drm_device *); ++extern void nv40_fb_takedown(struct drm_device *); ++ ++/* nv04_fifo.c */ ++extern int nv04_fifo_channel_id(struct drm_device *); ++extern int nv04_fifo_create_context(struct nouveau_channel *); ++extern void nv04_fifo_destroy_context(struct nouveau_channel *); ++extern int nv04_fifo_load_context(struct nouveau_channel *); ++extern int nv04_fifo_save_context(struct nouveau_channel *); ++ ++/* nv10_fifo.c */ ++extern int nv10_fifo_channel_id(struct drm_device *); ++extern int nv10_fifo_create_context(struct nouveau_channel *); ++extern void nv10_fifo_destroy_context(struct nouveau_channel *); ++extern int nv10_fifo_load_context(struct nouveau_channel *); ++extern int nv10_fifo_save_context(struct nouveau_channel *); ++ ++/* nv40_fifo.c */ ++extern int nv40_fifo_init(struct drm_device *); ++extern int nv40_fifo_create_context(struct nouveau_channel *); ++extern void nv40_fifo_destroy_context(struct nouveau_channel *); ++extern int nv40_fifo_load_context(struct nouveau_channel *); ++extern int nv40_fifo_save_context(struct nouveau_channel *); ++ ++/* nv50_fifo.c */ ++extern int nv50_fifo_init(struct drm_device *); ++extern void nv50_fifo_takedown(struct drm_device *); ++extern int nv50_fifo_channel_id(struct drm_device *); ++extern int nv50_fifo_create_context(struct nouveau_channel *); ++extern void nv50_fifo_destroy_context(struct nouveau_channel *); ++extern int nv50_fifo_load_context(struct nouveau_channel *); ++extern int nv50_fifo_save_context(struct nouveau_channel *); ++ ++/* nv04_graph.c */ ++extern void nouveau_nv04_context_switch(struct drm_device *); ++extern int nv04_graph_init(struct drm_device *); ++extern void nv04_graph_takedown(struct drm_device *); ++extern int nv04_graph_create_context(struct nouveau_channel *); ++extern void nv04_graph_destroy_context(struct nouveau_channel *); ++extern int nv04_graph_load_context(struct nouveau_channel *); ++extern int nv04_graph_save_context(struct nouveau_channel *); ++ ++/* nv10_graph.c */ ++extern void nouveau_nv10_context_switch(struct drm_device *); ++extern int nv10_graph_init(struct drm_device *); ++extern void nv10_graph_takedown(struct drm_device *); ++extern int nv10_graph_create_context(struct nouveau_channel *); ++extern void nv10_graph_destroy_context(struct nouveau_channel *); ++extern int nv10_graph_load_context(struct nouveau_channel *); ++extern int nv10_graph_save_context(struct nouveau_channel *); ++ ++/* nv20_graph.c */ ++extern int nv20_graph_create_context(struct nouveau_channel *); ++extern void nv20_graph_destroy_context(struct nouveau_channel *); ++extern int nv20_graph_load_context(struct nouveau_channel *); ++extern int nv20_graph_save_context(struct nouveau_channel *); ++extern int nv20_graph_init(struct drm_device *); ++extern void nv20_graph_takedown(struct drm_device *); ++extern int nv30_graph_init(struct drm_device *); ++ ++/* nv40_graph.c */ ++extern int nv40_graph_init(struct drm_device *); ++extern void nv40_graph_takedown(struct drm_device *); ++extern int nv40_graph_create_context(struct nouveau_channel *); ++extern void nv40_graph_destroy_context(struct nouveau_channel *); ++extern int nv40_graph_load_context(struct nouveau_channel *); ++extern int nv40_graph_save_context(struct nouveau_channel *); ++ ++/* nv50_graph.c */ ++extern int nv50_graph_init(struct drm_device *); ++extern void nv50_graph_takedown(struct drm_device *); ++extern int nv50_graph_create_context(struct nouveau_channel *); ++extern void nv50_graph_destroy_context(struct nouveau_channel *); ++extern int nv50_graph_load_context(struct nouveau_channel *); ++extern int nv50_graph_save_context(struct nouveau_channel *); ++ ++/* nv04_instmem.c */ ++extern int nv04_instmem_init(struct drm_device *); ++extern void nv04_instmem_takedown(struct drm_device *); ++extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv50_instmem.c */ ++extern int nv50_instmem_init(struct drm_device *); ++extern void nv50_instmem_takedown(struct drm_device *); ++extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, ++ uint32_t *size); ++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++ ++/* nv04_mc.c */ ++extern int nv04_mc_init(struct drm_device *); ++extern void nv04_mc_takedown(struct drm_device *); ++ ++/* nv40_mc.c */ ++extern int nv40_mc_init(struct drm_device *); ++extern void nv40_mc_takedown(struct drm_device *); ++ ++/* nv50_mc.c */ ++extern int nv50_mc_init(struct drm_device *); ++extern void nv50_mc_takedown(struct drm_device *); ++ ++/* nv04_timer.c */ ++extern int nv04_timer_init(struct drm_device *); ++extern uint64_t nv04_timer_read(struct drm_device *); ++extern void nv04_timer_takedown(struct drm_device *); ++ ++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg); ++ ++/* nouveau_buffer.c */ ++extern struct drm_bo_driver nouveau_bo_driver; ++ ++/* nouveau_fence.c */ ++extern struct drm_fence_driver nouveau_fence_driver; ++extern void nouveau_fence_handler(struct drm_device *dev, int channel); ++ ++#if defined(__powerpc__) ++#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) ++#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) ++#else ++#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#endif ++ ++/* PRAMIN access */ ++#if defined(__powerpc__) ++#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o)) ++#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v)) ++#else ++#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o)) ++#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v)) ++#endif ++ ++#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2)) ++#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v)) ++ ++#endif /* __NOUVEAU_DRV_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fence.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c +--- git/drivers/gpu/drm-tungsten/nouveau_fence.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,119 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++ ++static int ++nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* DRM's channel always uses IRQs to signal fences */ ++ if (class == dev_priv->channel.chan->id) ++ return 1; ++ ++ /* Other channels don't use IRQs at all yet */ ++ return 0; ++} ++ ++static int ++nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags, ++ uint32_t *breadcrumb, uint32_t *native_type) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ struct nouveau_drm_channel *dchan = &dev_priv->channel; ++ ++ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); ++ ++ /* We can't emit fences on client channels, update sequence number ++ * and userspace will emit the fence ++ */ ++ *breadcrumb = ++chan->next_sequence; ++ *native_type = DRM_FENCE_TYPE_EXE; ++ if (chan != dchan->chan) { ++ DRM_DEBUG("user fence 0x%08x\n", *breadcrumb); ++ return 0; ++ } ++ ++ DRM_DEBUG("emit 0x%08x\n", *breadcrumb); ++ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1); ++ OUT_RING (*breadcrumb); ++ BEGIN_RING(NvSubM2MF, 0x0150, 1); ++ OUT_RING (0); ++ FIRE_RING (); ++ ++ return 0; ++} ++ ++static void ++nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; ++ struct nouveau_channel *chan = dev_priv->fifos[class]; ++ ++ DRM_DEBUG("class=%d\n", class); ++ DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types); ++ ++ if (waiting_types & DRM_FENCE_TYPE_EXE) { ++ uint32_t sequence = NV_READ(chan->ref_cnt); ++ ++ DRM_DEBUG("got 0x%08x\n", sequence); ++ drm_fence_handler(dev, class, sequence, waiting_types, 0); ++ } ++} ++ ++void ++nouveau_fence_handler(struct drm_device *dev, int channel) ++{ ++ struct drm_fence_manager *fm = &dev->fm; ++ struct drm_fence_class_manager *fc = &fm->fence_class[channel]; ++ ++ DRM_DEBUG("class=%d\n", channel); ++ ++ write_lock(&fm->lock); ++ nouveau_fence_poll(dev, channel, fc->waiting_types); ++ write_unlock(&fm->lock); ++} ++ ++struct drm_fence_driver nouveau_fence_driver = { ++ .num_classes = 8, ++ .wrap_diff = (1 << 30), ++ .flush_diff = (1 << 29), ++ .sequence_mask = 0xffffffffU, ++ .has_irq = nouveau_fence_has_irq, ++ .emit = nouveau_fence_emit, ++ .flush = NULL, ++ .poll = nouveau_fence_poll, ++ .needed_flush = NULL, ++ .wait = NULL ++}; +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fifo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c +--- git/drivers/gpu/drm-tungsten/nouveau_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,601 @@ ++/* ++ * Copyright 2005-2006 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++ ++/* returns the size of fifo context */ ++int nouveau_fifo_ctx_size(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_40) ++ return 128; ++ else if (dev_priv->card_type >= NV_17) ++ return 64; ++ else ++ return 32; ++} ++ ++/*********************************** ++ * functions doing the actual work ++ ***********************************/ ++ ++static int nouveau_fifo_instmem_configure(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PFIFO_RAMHT, ++ (0x03 << 24) /* search 128 */ | ++ ((dev_priv->ramht_bits - 9) << 16) | ++ (dev_priv->ramht_offset >> 8) ++ ); ++ ++ NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); ++ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ switch (dev_priv->chipset) { ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x2230, 1); ++ break; ++ default: ++ break; ++ } ++ NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); ++ break; ++ case NV_44: ++ NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | ++ (2 << 16)); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) | ++ (1 << 16) /* 64 Bytes entry*/); ++ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ ++ break; ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); ++ break; ++ } ++ ++ return 0; ++} ++ ++int nouveau_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PFIFO); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PFIFO); ++ ++ /* Enable PFIFO error reporting */ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ ++ ret = nouveau_fifo_instmem_configure(dev); ++ if (ret) { ++ DRM_ERROR("Failed to configure instance memory\n"); ++ return ret; ++ } ++ ++ /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */ ++ ++ DRM_DEBUG("Setting defaults for remaining PFIFO regs\n"); ++ ++ /* All channels into PIO mode */ ++ NV_WRITE(NV04_PFIFO_MODE, 0x00000000); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ /* Channel 0 active, PIO mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000); ++ /* PUT and GET to 0 */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000); ++ /* No cmdbuf object */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000); ++ NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x00000000); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* FIXME on NV04 */ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ if (dev_priv->card_type >= NV_40) ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001); ++ else ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000); ++ } else { ++ NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0); ++ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ ); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000); ++ } ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ return 0; ++} ++ ++static int ++nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *pb = chan->pushbuf_mem; ++ struct nouveau_gpuobj *pushbuf = NULL; ++ int ret; ++ ++ if (pb->flags & NOUVEAU_MEM_AGP) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ &pushbuf, ++ &chan->pushbuf_base); ++ } else ++ if (pb->flags & NOUVEAU_MEM_PCI) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI_NONLINEAR, ++ &pushbuf); ++ chan->pushbuf_base = 0; ++ } else if (dev_priv->card_type != NV_04) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start, pb->size, ++ NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_VIDMEM, &pushbuf); ++ chan->pushbuf_base = 0; ++ } else { ++ /* NV04 cmdbuf hack, from original ddx.. not sure of it's ++ * exact reason for existing :) PCI access to cmdbuf in ++ * VRAM. ++ */ ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ pb->start + ++ drm_get_resource_start(dev, 1), ++ pb->size, NV_DMA_ACCESS_RO, ++ NV_DMA_TARGET_PCI, &pushbuf); ++ chan->pushbuf_base = 0; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, ++ &chan->pushbuf))) { ++ DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); ++ if (pushbuf != dev_priv->gart_info.sg_ctxdma) ++ nouveau_gpuobj_del(dev, &pushbuf); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct mem_block * ++nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_config *config = &dev_priv->config; ++ struct mem_block *pb; ++ int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); ++ ++ /* Defaults for unconfigured values */ ++ if (!config->cmdbuf.location) ++ config->cmdbuf.location = NOUVEAU_MEM_FB; ++ if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size) ++ config->cmdbuf.size = pb_min_size; ++ ++ pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, ++ config->cmdbuf.location | NOUVEAU_MEM_MAPPED, ++ (struct drm_file *)-2); ++ if (!pb) ++ DRM_ERROR("Couldn't allocate DMA push buffer.\n"); ++ ++ return pb; ++} ++ ++/* allocates and initializes a fifo for user space consumption */ ++int ++nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ++ struct drm_file *file_priv, struct mem_block *pushbuf, ++ uint32_t vram_handle, uint32_t tt_handle) ++{ ++ int ret; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *chan; ++ int channel; ++ ++ /* ++ * Alright, here is the full story ++ * Nvidia cards have multiple hw fifo contexts (praise them for that, ++ * no complicated crash-prone context switches) ++ * We allocate a new context for each app and let it write to it directly ++ * (woo, full userspace command submission !) ++ * When there are no more contexts, you lost ++ */ ++ for (channel = 0; channel < engine->fifo.channels; channel++) { ++ if (dev_priv->fifos[channel] == NULL) ++ break; ++ } ++ ++ /* no more fifos. you lost. */ ++ if (channel == engine->fifo.channels) ++ return -EINVAL; ++ ++ dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), ++ DRM_MEM_DRIVER); ++ if (!dev_priv->fifos[channel]) ++ return -ENOMEM; ++ dev_priv->fifo_alloc_count++; ++ chan = dev_priv->fifos[channel]; ++ chan->dev = dev; ++ chan->id = channel; ++ chan->file_priv = file_priv; ++ chan->pushbuf_mem = pushbuf; ++ ++ DRM_INFO("Allocating FIFO number %d\n", channel); ++ ++ /* Locate channel's user control regs */ ++ if (dev_priv->card_type < NV_40) { ++ chan->user = NV03_USER(channel); ++ chan->user_size = NV03_USER_SIZE; ++ chan->put = NV03_USER_DMA_PUT(channel); ++ chan->get = NV03_USER_DMA_GET(channel); ++ chan->ref_cnt = NV03_USER_REF_CNT(channel); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ chan->user = NV40_USER(channel); ++ chan->user_size = NV40_USER_SIZE; ++ chan->put = NV40_USER_DMA_PUT(channel); ++ chan->get = NV40_USER_DMA_GET(channel); ++ chan->ref_cnt = NV40_USER_REF_CNT(channel); ++ } else { ++ chan->user = NV50_USER(channel); ++ chan->user_size = NV50_USER_SIZE; ++ chan->put = NV50_USER_DMA_PUT(channel); ++ chan->get = NV50_USER_DMA_GET(channel); ++ chan->ref_cnt = NV50_USER_REF_CNT(channel); ++ } ++ ++ /* Allocate space for per-channel fixed notifier memory */ ++ ret = nouveau_notifier_init_channel(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Setup channel's default objects */ ++ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Create a dma object for the push buffer */ ++ ret = nouveau_fifo_pushbuf_ctxdma_init(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* Create a graphics context for new channel */ ++ ret = engine->graph.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* Construct inital RAMFC for new channel */ ++ ret = engine->fifo.create_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ /* setup channel's default get/put values ++ * XXX: quite possibly extremely pointless.. ++ */ ++ NV_WRITE(chan->get, chan->pushbuf_base); ++ NV_WRITE(chan->put, chan->pushbuf_base); ++ ++ /* If this is the first channel, setup PFIFO ourselves. For any ++ * other case, the GPU will handle this when it switches contexts. ++ */ ++ if (dev_priv->fifo_alloc_count == 1) { ++ ret = engine->fifo.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ nouveau_fifo_free(chan); ++ return ret; ++ } ++ } ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 1); ++ ++ DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); ++ *chan_ret = chan; ++ return 0; ++} ++ ++int ++nouveau_channel_idle(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t caches; ++ int idle; ++ ++ caches = NV_READ(NV03_PFIFO_CACHES); ++ NV_WRITE(NV03_PFIFO_CACHES, caches & ~1); ++ ++ if (engine->fifo.channel_id(dev) != chan->id) { ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1)) ++ idle = 0; ++ else ++ idle = 1; ++ } else { ++ idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) == ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, caches); ++ return idle; ++} ++ ++/* stops a fifo */ ++void nouveau_fifo_free(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint64_t t_start; ++ ++ DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id); ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ t_start = engine->timer.read(dev); ++ while (!nouveau_channel_idle(chan)) { ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before destroy." ++ "Prepare for strangeness..\n", chan->id); ++ break; ++ } ++ } ++ ++ /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched ++ * from CACHE1 too? ++ */ ++ ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ // FIXME XXX needs more code ++ ++ engine->fifo.destroy_context(chan); ++ ++ /* Cleanup PGRAPH state */ ++ engine->graph.destroy_context(chan); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ ++ /* Deallocate push buffer */ ++ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); ++ if (chan->pushbuf_mem) { ++ nouveau_mem_free(dev, chan->pushbuf_mem); ++ chan->pushbuf_mem = NULL; ++ } ++ ++ /* Destroy objects belonging to the channel */ ++ nouveau_gpuobj_channel_takedown(chan); ++ ++ nouveau_notifier_takedown_channel(chan); ++ ++ dev_priv->fifos[chan->id] = NULL; ++ dev_priv->fifo_alloc_count--; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++} ++ ++/* cleanups all the fifos from file_priv */ ++void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ DRM_DEBUG("clearing FIFO enables from file_priv\n"); ++ for(i = 0; i < engine->fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (chan && chan->file_priv == file_priv) ++ nouveau_fifo_free(chan); ++ } ++} ++ ++int ++nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv, ++ int channel) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ if (channel >= engine->fifo.channels) ++ return 0; ++ if (dev_priv->fifos[channel] == NULL) ++ return 0; ++ return (dev_priv->fifos[channel]->file_priv == file_priv); ++} ++ ++/*********************************** ++ * ioctls wrapping the functions ++ ***********************************/ ++ ++static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_channel_alloc *init = data; ++ struct drm_map_list *entry; ++ struct nouveau_channel *chan; ++ struct mem_block *pushbuf; ++ int res; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) ++ return -EINVAL; ++ ++ pushbuf = nouveau_fifo_user_pushbuf_alloc(dev); ++ if (!pushbuf) ++ return -ENOMEM; ++ ++ res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf, ++ init->fb_ctxdma_handle, ++ init->tt_ctxdma_handle); ++ if (res) ++ return res; ++ init->channel = chan->id; ++ init->put_base = chan->pushbuf_base; ++ ++ /* make the fifo available to user space */ ++ /* first, the fifo control regs */ ++ init->ctrl = dev_priv->mmio->offset + chan->user; ++ init->ctrl_size = chan->user_size; ++ res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, ++ 0, &chan->regs); ++ if (res != 0) ++ return res; ++ ++ entry = drm_find_matching_map(dev, chan->regs); ++ if (!entry) ++ return -EINVAL; ++ init->ctrl = entry->user_token; ++ ++ /* pass back FIFO map info to the caller */ ++ init->cmdbuf = chan->pushbuf_mem->map_handle; ++ init->cmdbuf_size = chan->pushbuf_mem->size; ++ ++ /* and the notifier block */ ++ init->notifier = chan->notifier_block->map_handle; ++ init->notifier_size = chan->notifier_block->size; ++ ++ return 0; ++} ++ ++static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_channel_free *cfree = data; ++ struct nouveau_channel *chan; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); ++ ++ nouveau_fifo_free(chan); ++ return 0; ++} ++ ++/*********************************** ++ * finally, the ioctl table ++ ***********************************/ ++ ++struct drm_ioctl_desc nouveau_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH), ++}; ++ ++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_ioc32.c git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c +--- git/drivers/gpu/drm-tungsten/nouveau_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,72 @@ ++/** ++ * \file mga_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the MGA DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++ ++#include "nouveau_drm.h" ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++#if 0 ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) ++ fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; ++#endif ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_irq.c git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c +--- git/drivers/gpu/drm-tungsten/nouveau_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,568 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++#include "nouveau_swmthd.h" ++ ++void ++nouveau_irq_preinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++int ++nouveau_irq_postinstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master enable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); ++ ++ return 0; ++} ++ ++void ++nouveau_irq_uninstall(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Master disable */ ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++} ++ ++static void ++nouveau_fifo_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t status, reassign; ++ ++ reassign = NV_READ(NV03_PFIFO_CACHES) & 1; ++ while ((status = NV_READ(NV03_PFIFO_INTR_0))) { ++ uint32_t chid, get; ++ ++ NV_WRITE(NV03_PFIFO_CACHES, 0); ++ ++ chid = engine->fifo.channel_id(dev); ++ get = NV_READ(NV03_PFIFO_CACHE1_GET); ++ ++ if (status & NV_PFIFO_INTR_CACHE_ERROR) { ++ uint32_t mthd, data; ++ int ptr; ++ ++ ptr = get >> 2; ++ if (dev_priv->card_type < NV_40) { ++ mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr)); ++ } else { ++ mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr)); ++ data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr)); ++ } ++ ++ DRM_INFO("PFIFO_CACHE_ERROR - " ++ "Ch %d/%d Mthd 0x%04x Data 0x%08x\n", ++ chid, (mthd >> 13) & 7, mthd & 0x1ffc, data); ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1); ++ ++ status &= ~NV_PFIFO_INTR_CACHE_ERROR; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); ++ } ++ ++ if (status & NV_PFIFO_INTR_DMA_PUSHER) { ++ DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid); ++ ++ status &= ~NV_PFIFO_INTR_DMA_PUSHER; ++ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); ++ if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get) ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PFIFO_INTR_0, status); ++ NV_WRITE(NV03_PMC_INTR_EN_0, 0); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHES, reassign); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); ++} ++ ++struct nouveau_bitfield_names { ++ uint32_t mask; ++ const char * name; ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names[] = ++{ ++ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] = ++{ ++ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } ++}; ++ ++static struct nouveau_bitfield_names nouveau_nsource_names[] = ++{ ++ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, ++ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, ++ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, ++ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, ++ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, ++ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, ++ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, ++}; ++ ++static void ++nouveau_print_bitfield_names(uint32_t value, ++ const struct nouveau_bitfield_names *namelist, ++ const int namelist_len) ++{ ++ int i; ++ for(i=0; idev_private; ++ uint32_t inst; ++ int i; ++ ++ if (dev_priv->card_type < NV_40) ++ return dev_priv->Engine.fifo.channels; ++ else ++ if (dev_priv->card_type < NV_50) ++ inst = (NV_READ(0x40032c) & 0xfffff) << 4; ++ else ++ inst = NV_READ(0x40032c) & 0xfffff; ++ ++ for (i = 0; i < dev_priv->Engine.fifo.channels; i++) { ++ struct nouveau_channel *chan = dev_priv->fifos[i]; ++ ++ if (!chan || !chan->ramin_grctx) ++ continue; ++ ++ if (dev_priv->card_type < NV_50) { ++ if (inst == chan->ramin_grctx->instance) ++ break; ++ } else { ++ if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0)) ++ break; ++ } ++ } ++ ++ return i; ++} ++ ++static int ++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int channel; ++ ++ if (dev_priv->card_type < NV_10) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; ++ else ++ if (dev_priv->card_type < NV_40) ++ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; ++ else ++ channel = nouveau_graph_chid_from_grctx(dev); ++ ++ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { ++ DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel); ++ return -EINVAL; ++ } ++ ++ *channel_ret = channel; ++ return 0; ++} ++ ++struct nouveau_pgraph_trap { ++ int channel; ++ int class; ++ int subc, mthd, size; ++ uint32_t data, data2; ++ uint32_t nsource, nstatus; ++}; ++ ++static void ++nouveau_graph_trap_info(struct drm_device *dev, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t address; ++ ++ trap->nsource = trap->nstatus = 0; ++ if (dev_priv->card_type < NV_50) { ++ trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS); ++ } ++ ++ if (nouveau_graph_trapped_channel(dev, &trap->channel)) ++ trap->channel = -1; ++ address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); ++ ++ trap->mthd = address & 0x1FFC; ++ trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA); ++ if (dev_priv->card_type < NV_10) { ++ trap->subc = (address >> 13) & 0x7; ++ } else { ++ trap->subc = (address >> 16) & 0x7; ++ trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH); ++ } ++ ++ if (dev_priv->card_type < NV_10) { ++ trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF; ++ } else if (dev_priv->card_type < NV_40) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF; ++ } else if (dev_priv->card_type < NV_50) { ++ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF; ++ } else { ++ trap->class = NV_READ(0x400814); ++ } ++} ++ ++static void ++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, ++ struct nouveau_pgraph_trap *trap) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t nsource = trap->nsource, nstatus = trap->nstatus; ++ ++ DRM_INFO("%s - nSource:", id); ++ nouveau_print_bitfield_names(nsource, nouveau_nsource_names, ++ ARRAY_SIZE(nouveau_nsource_names)); ++ printk(", nStatus:"); ++ if (dev_priv->card_type < NV_10) ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names, ++ ARRAY_SIZE(nouveau_nstatus_names)); ++ else ++ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10, ++ ARRAY_SIZE(nouveau_nstatus_names_nv10)); ++ printk("\n"); ++ ++ DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n", ++ id, trap->channel, trap->subc, trap->class, trap->mthd, ++ trap->data2, trap->data); ++} ++ ++static inline void ++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ /* NV4 (nvidia TNT 1) reports software methods with ++ * PGRAPH NOTIFY ILLEGAL_MTHD ++ */ ++ DRM_DEBUG("Got NV04 software method method %x for class %#x\n", ++ trap.mthd, trap.class); ++ ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ DRM_ERROR("Unable to execute NV04 software method %x " ++ "for object class %x. Please report.\n", ++ trap.mthd, trap.class); ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) ++{ ++ struct nouveau_pgraph_trap trap; ++ int unhandled = 0; ++ ++ nouveau_graph_trap_info(dev, &trap); ++ trap.nsource = nsource; ++ ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (trap.channel >= 0 && trap.mthd == 0x0150) { ++ nouveau_fence_handler(dev, trap.channel); ++ } else ++ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { ++ unhandled = 1; ++ } ++ } else { ++ unhandled = 1; ++ } ++ ++ if (unhandled) ++ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); ++} ++ ++static inline void ++nouveau_pgraph_intr_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ uint32_t chid; ++ ++ chid = engine->fifo.channel_id(dev); ++ DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid); ++ ++ switch(dev_priv->card_type) { ++ case NV_04: ++ case NV_05: ++ nouveau_nv04_context_switch(dev); ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ nouveau_nv10_context_switch(dev); ++ break; ++ default: ++ DRM_ERROR("Context switch not implemented\n"); ++ break; ++ } ++} ++ ++static void ++nouveau_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ while ((status = NV_READ(NV03_PGRAPH_INTR))) { ++ uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE); ++ ++ if (status & NV_PGRAPH_INTR_NOTIFY) { ++ nouveau_pgraph_intr_notify(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_NOTIFY; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); ++ } ++ ++ if (status & NV_PGRAPH_INTR_ERROR) { ++ nouveau_pgraph_intr_error(dev, nsource); ++ ++ status &= ~NV_PGRAPH_INTR_ERROR; ++ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); ++ } ++ ++ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { ++ nouveau_pgraph_intr_context_switch(dev); ++ ++ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ NV_WRITE(NV03_PGRAPH_INTR, ++ NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0) ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nv50_pgraph_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PGRAPH_INTR); ++ ++ if (status & 0x00000020) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); ++ ++ status &= ~0x00000020; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00000020); ++ } ++ ++ if (status & 0x00100000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_DATA_ERROR); ++ ++ status &= ~0x00100000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00100000); ++ } ++ ++ if (status & 0x00200000) { ++ nouveau_pgraph_intr_error(dev, ++ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); ++ ++ status &= ~0x00200000; ++ NV_WRITE(NV03_PGRAPH_INTR, 0x00200000); ++ } ++ ++ if (status) { ++ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status); ++ NV_WRITE(NV03_PGRAPH_INTR, status); ++ } ++ ++ { ++ const int isb = (1 << 16) | (1 << 0); ++ ++ if ((NV_READ(0x400500) & isb) != isb) ++ NV_WRITE(0x400500, NV_READ(0x400500) | isb); ++ } ++ ++ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++} ++ ++static void ++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (crtc&1) { ++ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++ ++ if (crtc&2) { ++ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); ++ } ++} ++ ++static void ++nouveau_nv50_display_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR); ++ ++ DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val); ++ ++ NV_WRITE(NV50_DISPLAY_SUPERVISOR, val); ++} ++ ++static void ++nouveau_nv50_i2c_irq_handler(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER)); ++ ++ /* This seems to be the way to acknowledge an interrupt. */ ++ NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF); ++} ++ ++irqreturn_t ++nouveau_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device*)arg; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t status; ++ ++ status = NV_READ(NV03_PMC_INTR_0); ++ if (!status) ++ return IRQ_NONE; ++ ++ if (status & NV_PMC_INTR_0_PFIFO_PENDING) { ++ nouveau_fifo_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_PFIFO_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { ++ if (dev_priv->card_type >= NV_50) ++ nv50_pgraph_irq_handler(dev); ++ else ++ nouveau_pgraph_irq_handler(dev); ++ ++ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_CRTCn_PENDING) { ++ nouveau_crtc_irq_handler(dev, (status>>24)&3); ++ status &= ~NV_PMC_INTR_0_CRTCn_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) { ++ nouveau_nv50_display_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING; ++ } ++ ++ if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) { ++ nouveau_nv50_i2c_irq_handler(dev); ++ status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING; ++ } ++ ++ if (status) ++ DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status); ++ ++ return IRQ_HANDLED; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_mem.c git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c +--- git/drivers/gpu/drm-tungsten/nouveau_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,872 @@ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * Copyright 2005 Stephane Marchesin ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ */ ++ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++ ++static struct mem_block * ++split_block(struct mem_block *p, uint64_t start, uint64_t size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++struct mem_block * ++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, ++ int align2, struct drm_file *file_priv, int tail) ++{ ++ struct mem_block *p; ++ uint64_t mask = (1 << align2) - 1; ++ ++ if (!heap) ++ return NULL; ++ ++ if (tail) { ++ list_for_each_prev(p, heap) { ++ uint64_t start = ((p->start + p->size) - size) & ~mask; ++ ++ if (p->file_priv == 0 && start >= p->start && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } else { ++ list_for_each(p, heap) { ++ uint64_t start = (p->start + mask) & ~mask; ++ ++ if (p->file_priv == 0 && ++ start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, uint64_t start) ++{ ++ struct mem_block *p; ++ ++ list_for_each(p, heap) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++void nouveau_mem_free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == 0) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFS); ++ } ++ ++ if (p->prev->file_priv == 0) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, ++ uint64_t size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* ++ * Free all blocks associated with the releasing file_priv ++ */ ++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ list_for_each(p, heap) { ++ if (p->file_priv == file_priv) ++ p->file_priv = NULL; ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ list_for_each(p, heap) { ++ while ((p->file_priv == 0) && (p->next->file_priv == 0) && ++ (p->next!=heap)) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ } ++} ++ ++/* ++ * Cleanup everything ++ */ ++void nouveau_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); ++ *heap = NULL; ++} ++ ++void nouveau_mem_close(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_mem_takedown(&dev_priv->agp_heap); ++ nouveau_mem_takedown(&dev_priv->fb_heap); ++ if (dev_priv->pci_heap) ++ nouveau_mem_takedown(&dev_priv->pci_heap); ++} ++ ++/*XXX won't work on BSD because of pci_read_config_dword */ ++static uint32_t ++nouveau_mem_fb_amount_igp(struct drm_device *dev) ++{ ++#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct pci_dev *bridge; ++ uint32_t mem; ++ ++ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1)); ++ if (!bridge) { ++ DRM_ERROR("no bridge device\n"); ++ return 0; ++ } ++ ++ if (dev_priv->flags&NV_NFORCE) { ++ pci_read_config_dword(bridge, 0x7C, &mem); ++ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; ++ } else ++ if(dev_priv->flags&NV_NFORCE2) { ++ pci_read_config_dword(bridge, 0x84, &mem); ++ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; ++ } ++ ++ DRM_ERROR("impossible!\n"); ++#else ++ DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n"); ++#endif ++ ++ return 0; ++} ++ ++/* returns the amount of FB ram in bytes */ ++uint64_t nouveau_mem_fb_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) ++ { ++ case NV_04: ++ case NV_05: ++ if (NV_READ(NV03_BOOT_0) & 0x00000100) { ++ return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; ++ } else ++ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) ++ { ++ case NV04_BOOT_0_RAM_AMOUNT_32MB: ++ return 32*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_16MB: ++ return 16*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_8MB: ++ return 8*1024*1024; ++ case NV04_BOOT_0_RAM_AMOUNT_4MB: ++ return 4*1024*1024; ++ } ++ break; ++ case NV_10: ++ case NV_11: ++ case NV_17: ++ case NV_20: ++ case NV_30: ++ case NV_40: ++ case NV_44: ++ case NV_50: ++ default: ++ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { ++ return nouveau_mem_fb_amount_igp(dev); ++ } else { ++ uint64_t mem; ++ ++ mem = (NV_READ(NV04_FIFO_DATA) & ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> ++ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; ++ return mem*1024*1024; ++ } ++ break; ++ } ++ ++ DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); ++ return 0; ++} ++ ++static void nouveau_mem_reset_agp(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; ++ ++ saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); ++ saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); ++ ++ /* clear busmaster bit */ ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); ++ /* clear SBA and AGP bits */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); ++ ++ /* power cycle pgraph, if enabled */ ++ pmc_enable = NV_READ(NV03_PMC_ENABLE); ++ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { ++ NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ } ++ ++ /* and restore (gives effect of resetting AGP) */ ++ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); ++ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); ++} ++ ++static int ++nouveau_mem_init_agp(struct drm_device *dev, int ttm) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ int ret; ++ ++ nouveau_mem_reset_agp(dev); ++ ++ ret = drm_agp_acquire(dev); ++ if (ret) { ++ DRM_ERROR("Unable to acquire AGP: %d\n", ret); ++ return ret; ++ } ++ ++ ret = drm_agp_info(dev, &info); ++ if (ret) { ++ DRM_ERROR("Unable to get AGP info: %d\n", ret); ++ return ret; ++ } ++ ++ /* see agp.h for the AGPSTAT_* modes available */ ++ mode.mode = info.mode; ++ ret = drm_agp_enable(dev, mode); ++ if (ret) { ++ DRM_ERROR("Unable to enable AGP: %d\n", ret); ++ return ret; ++ } ++ ++ if (!ttm) { ++ struct drm_agp_buffer agp_req; ++ struct drm_agp_binding bind_req; ++ ++ agp_req.size = info.aperture_size; ++ agp_req.type = 0; ++ ret = drm_agp_alloc(dev, &agp_req); ++ if (ret) { ++ DRM_ERROR("Unable to alloc AGP: %d\n", ret); ++ return ret; ++ } ++ ++ bind_req.handle = agp_req.handle; ++ bind_req.offset = 0; ++ ret = drm_agp_bind(dev, &bind_req); ++ if (ret) { ++ DRM_ERROR("Unable to bind AGP: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_AGP; ++ dev_priv->gart_info.aper_base = info.aperture_base; ++ dev_priv->gart_info.aper_size = info.aperture_size; ++ return 0; ++} ++ ++#define HACK_OLD_MM ++int ++nouveau_mem_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t vram_size, bar1_size; ++ int ret; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = drm_get_resource_start(dev,1); ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ drm_bo_driver_init(dev); ++ ++ /* non-mappable vram */ ++ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); ++ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; ++ vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; ++ bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; ++ if (bar1_size < vram_size) { ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, ++ bar1_size, vram_size - bar1_size, 1))) { ++ DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); ++ return ret; ++ } ++ vram_size = bar1_size; ++ } ++ ++ /* mappable vram */ ++#ifdef HACK_OLD_MM ++ vram_size /= 4; ++#endif ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { ++ DRM_ERROR("Failed VRAM mm init: %d\n", ret); ++ return ret; ++ } ++ ++ /* GART */ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 1))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { ++ if ((ret = nouveau_sgdma_init(dev))) ++ DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); ++ } ++ ++ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, ++ dev_priv->gart_info.aper_size >> ++ PAGE_SHIFT, 1))) { ++ DRM_ERROR("Failed TT mm init: %d\n", ret); ++ return ret; ++ } ++ ++#ifdef HACK_OLD_MM ++ vram_size <<= PAGE_SHIFT; ++ DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) ++ return -ENOMEM; ++#endif ++ ++ return 0; ++} ++ ++int nouveau_mem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_size; ++ int ret = 0; ++ ++ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; ++ dev_priv->fb_phys = 0; ++ dev_priv->gart_info.type = NOUVEAU_GART_NONE; ++ ++ /* setup a mtrr over the FB */ ++ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), ++ nouveau_mem_fb_amount(dev), ++ DRM_MTRR_WC); ++ ++ /* Init FB */ ++ dev_priv->fb_phys=drm_get_resource_start(dev,1); ++ fb_size = nouveau_mem_fb_amount(dev); ++ /* On G80, limit VRAM to 512MiB temporarily due to limits in how ++ * we handle VRAM page tables. ++ */ ++ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) ++ fb_size = (512 * 1024 * 1024); ++ /* On at least NV40, RAMIN is actually at the end of vram. ++ * We don't want to allocate this... */ ++ if (dev_priv->card_type >= NV_40) ++ fb_size -= dev_priv->ramin_rsvd_vram; ++ dev_priv->fb_available_size = fb_size; ++ DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); ++ ++ if (fb_size>256*1024*1024) { ++ /* On cards with > 256Mb, you can't map everything. ++ * So we create a second FB heap for that type of memory */ ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, ++ 0, 256*1024*1024)) ++ return -ENOMEM; ++ if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, ++ 256*1024*1024, fb_size-256*1024*1024)) ++ return -ENOMEM; ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) ++ return -ENOMEM; ++ dev_priv->fb_nomap_heap=NULL; ++ } ++ ++#if !defined(__powerpc__) && !defined(__ia64__) ++ /* Init AGP / NV50 PCIEGART */ ++ if (drm_device_is_agp(dev) && dev->agp) { ++ if ((ret = nouveau_mem_init_agp(dev, 0))) ++ DRM_ERROR("Error initialising AGP: %d\n", ret); ++ } ++#endif ++ ++ /*Note: this is *not* just NV50 code, but only used on NV50 for now */ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && ++ dev_priv->card_type >= NV_50) { ++ ret = nouveau_sgdma_init(dev); ++ if (!ret) { ++ ret = nouveau_sgdma_nottm_hack_init(dev); ++ if (ret) ++ nouveau_sgdma_takedown(dev); ++ } ++ ++ if (ret) ++ DRM_ERROR("Error initialising SG DMA: %d\n", ret); ++ } ++ ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ if (nouveau_mem_init_heap(&dev_priv->agp_heap, ++ 0, dev_priv->gart_info.aper_size)) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ } ++ } ++ } ++ ++ /* NV04-NV40 PCIEGART */ ++ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { ++ struct drm_scatter_gather sgreq; ++ ++ DRM_DEBUG("Allocating sg memory for PCI DMA\n"); ++ sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone ++ ++ if (drm_sg_alloc(dev, &sgreq)) { ++ DRM_ERROR("Unable to allocate %ldMB of scatter-gather" ++ " pages for PCI DMA!",sgreq.size>>20); ++ } else { ++ if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, ++ dev->sg->pages * PAGE_SIZE)) { ++ DRM_ERROR("Unable to initialize pci_heap!"); ++ } ++ } ++ } ++ ++ /* G8x: Allocate shared page table to map real VRAM pages into */ ++ if (dev_priv->card_type >= NV_50) { ++ unsigned size = ((512 * 1024 * 1024) / 65536) * 8; ++ ++ ret = nouveau_gpuobj_new(dev, NULL, size, 0, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->vm_vram_pt); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM page table: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ ++ return 0; ++} ++ ++struct mem_block * ++nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, ++ int flags, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct mem_block *block; ++ int type, tail = !(flags & NOUVEAU_MEM_USER); ++ ++ /* ++ * Make things easier on ourselves: all allocations are page-aligned. ++ * We need that to map allocated regions into the user space ++ */ ++ if (alignment < PAGE_SHIFT) ++ alignment = PAGE_SHIFT; ++ ++ /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB ++ * page size in the GPU VM. ++ */ ++ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { ++ size = (size + 65535) & ~65535; ++ if (alignment < 16) ++ alignment = 16; ++ } ++ ++ /* ++ * Warn about 0 sized allocations, but let it go through. It'll return 1 page ++ */ ++ if (size == 0) ++ DRM_INFO("warning : 0 byte allocation\n"); ++ ++ /* ++ * Keep alloc size a multiple of the page size to keep drm_addmap() happy ++ */ ++ if (size & (~PAGE_MASK)) ++ size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; ++ ++ ++#define NOUVEAU_MEM_ALLOC_AGP {\ ++ type=NOUVEAU_MEM_AGP;\ ++ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ ++ alignment, file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_PCI {\ ++ type = NOUVEAU_MEM_PCI;\ ++ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ ++ alignment, file_priv, tail); \ ++ if ( block ) goto alloc_ok;\ ++ } ++ ++#define NOUVEAU_MEM_ALLOC_FB {\ ++ type=NOUVEAU_MEM_FB;\ ++ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ ++ size, alignment, \ ++ file_priv, tail); \ ++ if (block) goto alloc_ok;\ ++ }\ ++ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ ++ alignment, file_priv, tail);\ ++ if (block) goto alloc_ok;\ ++ } ++ ++ ++ if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI ++ if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB ++ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP ++ if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI ++ ++ ++ return NULL; ++ ++alloc_ok: ++ block->flags=type; ++ ++ /* On G8x, map memory into VM */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ unsigned tile = 0; ++ ++ if (!pt) { ++ DRM_ERROR("vm alloc without vm pt\n"); ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ /* The tiling stuff is *not* what NVIDIA does - but both the ++ * 2D and 3D engines seem happy with this simpler method. ++ * Should look into why NVIDIA do what they do at some point. ++ */ ++ if (flags & NOUVEAU_MEM_TILE) { ++ if (flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } else { ++ block->flags |= NOUVEAU_MEM_NOVM; ++ } ++ ++ if (flags&NOUVEAU_MEM_MAPPED) ++ { ++ struct drm_map_list *entry; ++ int ret = 0; ++ block->flags|=NOUVEAU_MEM_MAPPED; ++ ++ if (type == NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_AGP, 0, &block->map); ++ else ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ } ++ else if (type == NOUVEAU_MEM_FB) ++ ret = drm_addmap(dev, block->start + dev_priv->fb_phys, ++ block->size, _DRM_FRAME_BUFFER, ++ 0, &block->map); ++ else if (type == NOUVEAU_MEM_PCI) ++ ret = drm_addmap(dev, block->start, block->size, ++ _DRM_SCATTER_GATHER, 0, &block->map); ++ ++ if (ret) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ ++ entry = drm_find_matching_map(dev, block->map); ++ if (!entry) { ++ nouveau_mem_free_block(block); ++ return NULL; ++ } ++ block->map_handle = entry->user_token; ++ } ++ ++ DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); ++ return block; ++} ++ ++void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); ++ ++ if (block->flags&NOUVEAU_MEM_MAPPED) ++ drm_rmmap(dev, block->map); ++ ++ /* G8x: Remove pages from vm */ ++ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && ++ !(block->flags & NOUVEAU_MEM_NOVM)) { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start; ++ unsigned count = block->size / 65536; ++ ++ if (!pt) { ++ DRM_ERROR("vm free without vm pt\n"); ++ goto out_free; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ INSTANCE_WR(pt, (pte * 2) + 0, 0); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0); ++ offset += 65536; ++ } ++ } ++ ++out_free: ++ nouveau_mem_free_block(block); ++} ++ ++/* ++ * Ioctls ++ */ ++ ++int ++nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_alloc *alloc = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (alloc->flags & NOUVEAU_MEM_INTERNAL) ++ return -EINVAL; ++ ++ block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, ++ alloc->flags | NOUVEAU_MEM_USER, file_priv); ++ if (!block) ++ return -ENOMEM; ++ alloc->map_handle=block->map_handle; ++ alloc->offset=block->start; ++ alloc->flags=block->flags; ++ ++ if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) ++ alloc->offset += 512*1024*1024; ++ ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_free *memfree = data; ++ struct mem_block *block; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) ++ memfree->offset -= 512*1024*1024; ++ ++ block=NULL; ++ if (memfree->flags & NOUVEAU_MEM_FB) ++ block = find_block(dev_priv->fb_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_AGP) ++ block = find_block(dev_priv->agp_heap, memfree->offset); ++ else if (memfree->flags & NOUVEAU_MEM_PCI) ++ block = find_block(dev_priv->pci_heap, memfree->offset); ++ if (!block) ++ return -EFAULT; ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ nouveau_mem_free(dev, block); ++ return 0; ++} ++ ++int ++nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_mem_tile *memtile = data; ++ struct mem_block *block = NULL; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ if (dev_priv->card_type < NV_50) ++ return -EINVAL; ++ ++ if (memtile->flags & NOUVEAU_MEM_FB) { ++ memtile->offset -= 512*1024*1024; ++ block = find_block(dev_priv->fb_heap, memtile->offset); ++ } ++ ++ if (!block) ++ return -EINVAL; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ { ++ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; ++ unsigned offset = block->start + memtile->delta; ++ unsigned count = memtile->size / 65536; ++ unsigned tile = 0; ++ ++ if (memtile->flags & NOUVEAU_MEM_TILE) { ++ if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) ++ tile = 0x00002800; ++ else ++ tile = 0x00007000; ++ } ++ ++ while (count--) { ++ unsigned pte = offset / 65536; ++ ++ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); ++ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); ++ offset += 65536; ++ } ++ } ++ ++ return 0; ++} ++ +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_notifier.c git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c +--- git/drivers/gpu/drm-tungsten/nouveau_notifier.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,165 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nouveau_notifier_init_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ int flags, ret; ++ ++ flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | ++ NOUVEAU_MEM_FB_ACCEPTABLE); ++ ++ chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, ++ (struct drm_file *)-2); ++ if (!chan->notifier_block) ++ return -ENOMEM; ++ DRM_DEBUG("Allocated notifier block in 0x%08x\n", ++ chan->notifier_block->flags); ++ ++ ret = nouveau_mem_init_heap(&chan->notifier_heap, ++ 0, chan->notifier_block->size); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ ++ if (chan->notifier_block) { ++ nouveau_mem_free(dev, chan->notifier_block); ++ chan->notifier_block = NULL; ++ } ++ ++ nouveau_mem_takedown(&chan->notifier_heap); ++} ++ ++static void ++nouveau_notifier_gpuobj_dtor(struct drm_device *dev, ++ struct nouveau_gpuobj *gpuobj) ++{ ++ DRM_DEBUG("\n"); ++ ++ if (gpuobj->priv) ++ nouveau_mem_free_block(gpuobj->priv); ++} ++ ++int ++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, ++ int count, uint32_t *b_offset) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *nobj = NULL; ++ struct mem_block *mem; ++ uint32_t offset; ++ int target, ret; ++ ++ if (!chan->notifier_heap) { ++ DRM_ERROR("Channel %d doesn't have a notifier heap!\n", ++ chan->id); ++ return -EINVAL; ++ } ++ ++ mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0, ++ (struct drm_file *)-2, 0); ++ if (!mem) { ++ DRM_ERROR("Channel %d notifier block full\n", chan->id); ++ return -ENOMEM; ++ } ++ mem->flags = NOUVEAU_MEM_NOTIFIER; ++ ++ offset = chan->notifier_block->start; ++ if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { ++ target = NV_DMA_TARGET_VIDMEM; ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && ++ dev_priv->card_type < NV_50) { ++ ret = nouveau_sgdma_get_page(dev, offset, &offset); ++ if (ret) ++ return ret; ++ target = NV_DMA_TARGET_PCI; ++ } else { ++ target = NV_DMA_TARGET_AGP; ++ } ++ } else ++ if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) { ++ target = NV_DMA_TARGET_PCI_NONLINEAR; ++ } else { ++ DRM_ERROR("Bad DMA target, flags 0x%08x!\n", ++ chan->notifier_block->flags); ++ return -EINVAL; ++ } ++ offset += mem->start; ++ ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, mem->size, ++ NV_DMA_ACCESS_RW, target, &nobj))) { ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ nobj->dtor = nouveau_notifier_gpuobj_dtor; ++ nobj->priv = mem; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) { ++ nouveau_gpuobj_del(dev, &nobj); ++ nouveau_mem_free_block(mem); ++ DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ *b_offset = mem->start; ++ return 0; ++} ++ ++int ++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_notifierobj_alloc *na = data; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ++ ++ ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset); ++ if (ret) ++ return ret; ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_object.c git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c +--- git/drivers/gpu/drm-tungsten/nouveau_object.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1178 @@ ++/* ++ * Copyright (C) 2006 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++/* NVidia uses context objects to drive drawing operations. ++ ++ Context objects can be selected into 8 subchannels in the FIFO, ++ and then used via DMA command buffers. ++ ++ A context object is referenced by a user defined handle (CARD32). The HW ++ looks up graphics objects in a hash table in the instance RAM. ++ ++ An entry in the hash table consists of 2 CARD32. The first CARD32 contains ++ the handle, the second one a bitfield, that contains the address of the ++ object in instance RAM. ++ ++ The format of the second CARD32 seems to be: ++ ++ NV4 to NV30: ++ ++ 15: 0 instance_addr >> 4 ++ 17:16 engine (here uses 1 = graphics) ++ 28:24 channel id (here uses 0) ++ 31 valid (use 1) ++ ++ NV40: ++ ++ 15: 0 instance_addr >> 4 (maybe 19-0) ++ 21:20 engine (here uses 1 = graphics) ++ I'm unsure about the other bits, but using 0 seems to work. ++ ++ The key into the hash table depends on the object handle and channel id and ++ is given as: ++*/ ++static uint32_t ++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t hash = 0; ++ int i; ++ ++ DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle); ++ ++ for (i=32;i>0;i-=dev_priv->ramht_bits) { ++ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); ++ handle >>= dev_priv->ramht_bits; ++ } ++ if (dev_priv->card_type < NV_50) ++ hash ^= channel << (dev_priv->ramht_bits - 4); ++ hash <<= 3; ++ ++ DRM_DEBUG("hash=0x%08x\n", hash); ++ return hash; ++} ++ ++static int ++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, ++ uint32_t offset) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); ++ ++ if (dev_priv->card_type < NV_40) ++ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); ++ return (ctx != 0); ++} ++ ++static int ++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ struct nouveau_gpuobj *gpuobj = ref->gpuobj; ++ uint32_t ctx, co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_40) { ++ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | ++ (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else ++ if (dev_priv->card_type < NV_50) { ++ ctx = (ref->instance >> 4) | ++ (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } else { ++ ctx = (ref->instance >> 4) | ++ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (!nouveau_ramht_entry_valid(dev, ramht, co)) { ++ DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ctx); ++ INSTANCE_WR(ramht, (co + 0)/4, ref->handle); ++ INSTANCE_WR(ramht, (co + 4)/4, ctx); ++ ++ list_add_tail(&ref->list, &chan->ramht_refs); ++ return 0; ++ } ++ DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", ++ ref->channel, co, INSTANCE_RD(ramht, co/4)); ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) { ++ DRM_INFO("no space left after collision\n"); ++ co = 0; ++ /* exit as it seems to cause crash with nouveau_demo and ++ * 0xdead0001 object */ ++ break; ++ } ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); ++ return -ENOMEM; ++} ++ ++static void ++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[ref->channel]; ++ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; ++ uint32_t co, ho; ++ ++ if (!ramht) { ++ DRM_ERROR("No hash table!\n"); ++ return; ++ } ++ ++ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); ++ do { ++ if (nouveau_ramht_entry_valid(dev, ramht, co) && ++ (ref->handle == INSTANCE_RD(ramht, (co/4)))) { ++ DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", ++ ref->channel, co, ref->handle, ++ INSTANCE_RD(ramht, (co + 4))); ++ INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); ++ INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); ++ ++ list_del(&ref->list); ++ return; ++ } ++ ++ co += 8; ++ if (co >= dev_priv->ramht_size) ++ co = 0; ++ } while (co != ho); ++ ++ DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", ++ ref->channel, ref->handle); ++} ++ ++int ++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, ++ int size, int align, uint32_t flags, ++ struct nouveau_gpuobj **gpuobj_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ struct mem_block *pramin = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", ++ chan ? chan->id : -1, size, align, flags); ++ ++ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) ++ return -EINVAL; ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->flags = flags; ++ gpuobj->im_channel = chan ? chan->id : -1; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ /* Choose between global instmem heap, and per-channel private ++ * instmem heap. On ramin_heap) { ++ DRM_DEBUG("private heap\n"); ++ pramin = chan->ramin_heap; ++ } else ++ if (dev_priv->card_type < NV_50) { ++ DRM_DEBUG("global heap fallback\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ } else { ++ DRM_DEBUG("global heap\n"); ++ pramin = dev_priv->ramin_heap; ++ } ++ ++ if (!pramin) { ++ DRM_ERROR("No PRAMIN heap!\n"); ++ return -EINVAL; ++ } ++ ++ if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ /* Allocate a chunk of the PRAMIN aperture */ ++ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, ++ drm_order(align), ++ (struct drm_file *)-2, 0); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; ++ ++ if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ int i; ++ ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ *gpuobj_ret = gpuobj; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_early_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ INIT_LIST_HEAD(&dev_priv->gpuobj_list); ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ if (dev_priv->card_type < NV_50) { ++ if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ++ ~0, dev_priv->ramht_size, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ALLOW_NO_REFS, ++ &dev_priv->ramht, NULL))) ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ nouveau_gpuobj_del(dev, &dev_priv->ramht); ++} ++ ++void ++nouveau_gpuobj_late_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ struct list_head *entry, *tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { ++ gpuobj = list_entry(entry, struct nouveau_gpuobj, list); ++ ++ DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", ++ gpuobj, gpuobj->refcount); ++ gpuobj->refcount = 0; ++ nouveau_gpuobj_del(dev, &gpuobj); ++ } ++} ++ ++int ++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_gpuobj *gpuobj; ++ ++ DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); ++ ++ if (!dev_priv || !pgpuobj || !(*pgpuobj)) ++ return -EINVAL; ++ gpuobj = *pgpuobj; ++ ++ if (gpuobj->refcount != 0) { ++ DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); ++ return -EINVAL; ++ } ++ ++ if (gpuobj->dtor) ++ gpuobj->dtor(dev, gpuobj); ++ ++ if (gpuobj->im_backing) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_backing, ++ sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER); ++ else ++ engine->instmem.clear(dev, gpuobj); ++ } ++ ++ if (gpuobj->im_pramin) { ++ if (gpuobj->flags & NVOBJ_FLAG_FAKE) ++ drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), ++ DRM_MEM_DRIVER); ++ else ++ nouveau_mem_free_block(gpuobj->im_pramin); ++ } ++ ++ list_del(&gpuobj->list); ++ ++ *pgpuobj = NULL; ++ drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_instance_get(struct drm_device *dev, ++ struct nouveau_channel *chan, ++ struct nouveau_gpuobj *gpuobj, uint32_t *inst) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *cpramin; ++ ++ /* card_type < NV_50) { ++ *inst = gpuobj->im_pramin->start; ++ return 0; ++ } ++ ++ if (chan && gpuobj->im_channel != chan->id) { ++ DRM_ERROR("Channel mismatch: obj %d, ref %d\n", ++ gpuobj->im_channel, chan->id); ++ return -EINVAL; ++ } ++ ++ /* NV50 channel-local instance */ ++ if (chan > 0) { ++ cpramin = chan->ramin->gpuobj; ++ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; ++ return 0; ++ } ++ ++ /* NV50 global (VRAM) instance */ ++ if (gpuobj->im_channel < 0) { ++ /* ...from global heap */ ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("AII, no VRAM backing gpuobj\n"); ++ return -EINVAL; ++ } ++ *inst = gpuobj->im_backing->start; ++ return 0; ++ } else { ++ /* ...from local heap */ ++ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; ++ *inst = cpramin->im_backing->start + ++ (gpuobj->im_pramin->start - cpramin->im_pramin->start); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, ++ uint32_t handle, struct nouveau_gpuobj *gpuobj, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_ref *ref; ++ uint32_t instance; ++ int ret; ++ ++ DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", ++ chan ? chan->id : -1, handle, gpuobj); ++ ++ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) ++ return -EINVAL; ++ ++ if (!chan && !ref_ret) ++ return -EINVAL; ++ ++ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); ++ if (ret) ++ return ret; ++ ++ ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); ++ if (!ref) ++ return -ENOMEM; ++ ref->gpuobj = gpuobj; ++ ref->channel = chan ? chan->id : -1; ++ ref->instance = instance; ++ ++ if (!ref_ret) { ++ ref->handle = handle; ++ ++ ret = nouveau_ramht_insert(dev, ref); ++ if (ret) { ++ drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); ++ return ret; ++ } ++ } else { ++ ref->handle = ~0; ++ *ref_ret = ref; ++ } ++ ++ ref->gpuobj->refcount++; ++ return 0; ++} ++ ++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ref %p\n", pref ? *pref : NULL); ++ ++ if (!dev || !pref || *pref == NULL) ++ return -EINVAL; ++ ref = *pref; ++ ++ if (ref->handle != ~0) ++ nouveau_ramht_remove(dev, ref); ++ ++ if (ref->gpuobj) { ++ ref->gpuobj->refcount--; ++ ++ if (ref->gpuobj->refcount == 0) { ++ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) ++ nouveau_gpuobj_del(dev, &ref->gpuobj); ++ } ++ } ++ ++ *pref = NULL; ++ drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); ++ return 0; ++} ++ ++int ++nouveau_gpuobj_new_ref(struct drm_device *dev, ++ struct nouveau_channel *oc, struct nouveau_channel *rc, ++ uint32_t handle, int size, int align, uint32_t flags, ++ struct nouveau_gpuobj_ref **ref) ++{ ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) ++ return ret; ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, ++ struct nouveau_gpuobj_ref **ref_ret) ++{ ++ struct nouveau_gpuobj_ref *ref; ++ struct list_head *entry, *tmp; ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ if (ref->handle == handle) { ++ if (ref_ret) ++ *ref_ret = ref; ++ return 0; ++ } ++ } ++ ++ return -EINVAL; ++} ++ ++int ++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, ++ uint32_t b_offset, uint32_t size, ++ uint32_t flags, struct nouveau_gpuobj **pgpuobj, ++ struct nouveau_gpuobj_ref **pref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ int i; ++ ++ DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", ++ p_offset, b_offset, size, flags); ++ ++ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); ++ if (!gpuobj) ++ return -ENOMEM; ++ DRM_DEBUG("gpuobj %p\n", gpuobj); ++ gpuobj->im_channel = -1; ++ gpuobj->flags = flags | NVOBJ_FLAG_FAKE; ++ ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ ++ if (p_offset != ~0) { ++ gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_pramin) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_pramin->start = p_offset; ++ gpuobj->im_pramin->size = size; ++ } ++ ++ if (b_offset != ~0) { ++ gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block), ++ DRM_MEM_DRIVER); ++ if (!gpuobj->im_backing) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return -ENOMEM; ++ } ++ gpuobj->im_backing->start = b_offset; ++ gpuobj->im_backing->size = size; ++ } ++ ++ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { ++ for (i = 0; i < gpuobj->im_pramin->size; i += 4) ++ INSTANCE_WR(gpuobj, i/4, 0); ++ } ++ ++ if (pref) { ++ if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) { ++ nouveau_gpuobj_del(dev, &gpuobj); ++ return i; ++ } ++ } ++ ++ if (pgpuobj) ++ *pgpuobj = gpuobj; ++ return 0; ++} ++ ++ ++static int ++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /*XXX: dodgy hack for now */ ++ if (dev_priv->card_type >= NV_50) ++ return 24; ++ if (dev_priv->card_type >= NV_40) ++ return 32; ++ return 16; ++} ++ ++/* ++ DMA objects are used to reference a piece of memory in the ++ framebuffer, PCI or AGP address space. Each object is 16 bytes big ++ and looks as follows: ++ ++ entry[0] ++ 11:0 class (seems like I can always use 0 here) ++ 12 page table present? ++ 13 page entry linear? ++ 15:14 access: 0 rw, 1 ro, 2 wo ++ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP ++ 31:20 dma adjust (bits 0-11 of the address) ++ entry[1] ++ dma limit (size of transfer) ++ entry[X] ++ 1 0 readonly, 1 readwrite ++ 31:12 dma frame address of the page (bits 12-31 of the address) ++ entry[N] ++ page table terminator, same value as the first pte, as does nvidia ++ rivatv uses 0xffffffff ++ ++ Non linear page tables need a list of frame addresses afterwards, ++ the rivatv project has some info on this. ++ ++ The method below creates a DMA object in instance RAM and returns a handle ++ to it that can be used to set up context objects. ++*/ ++int ++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, ++ uint64_t offset, uint64_t size, int access, ++ int target, struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ uint32_t is_scatter_gather = 0; ++ ++ /* Total number of pages covered by the request. ++ */ ++ const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; ++ ++ ++ DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", ++ chan->id, class, offset, size); ++ DRM_DEBUG("access=%d target=%d\n", access, target); ++ ++ switch (target) { ++ case NV_DMA_TARGET_AGP: ++ offset += dev_priv->gart_info.aper_base; ++ break; ++ case NV_DMA_TARGET_PCI_NONLINEAR: ++ /*assume the "offset" is a virtual memory address*/ ++ is_scatter_gather = 1; ++ /*put back the right value*/ ++ target = NV_DMA_TARGET_PCI; ++ break; ++ default: ++ break; ++ } ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ uint32_t frame, adjust, pte_flags = 0; ++ adjust = offset & 0x00000fff; ++ if (access != NV_DMA_ACCESS_RO) ++ pte_flags |= (1<<1); ++ ++ if ( ! is_scatter_gather ) ++ { ++ frame = offset & ~0x00000fff; ++ ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, size - 1); ++ INSTANCE_WR(*gpuobj, 2, frame | pte_flags); ++ INSTANCE_WR(*gpuobj, 3, frame | pte_flags); ++ } ++ else ++ { ++ /* Intial page entry in the scatter-gather area that ++ * corresponds to the base offset ++ */ ++ unsigned int idx = offset / PAGE_SIZE; ++ ++ uint32_t instance_offset; ++ unsigned int i; ++ ++ if ((idx + page_count) > dev->sg->pages) { ++ DRM_ERROR("Requested page range exceedes " ++ "allocated scatter-gather range!"); ++ return -E2BIG; ++ } ++ ++ DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); ++ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | ++ (adjust << 20) | ++ (access << 14) | ++ (target << 16) | ++ class)); ++ INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1); ++ ++ ++ /*write starting at the third dword*/ ++ instance_offset = 2; ++ ++ /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ ++ for (i = 0; i < page_count; i++) { ++ if (dev->sg->busaddr[idx] == 0) { ++ dev->sg->busaddr[idx] = ++ pci_map_page(dev->pdev, ++ dev->sg->pagelist[idx], ++ 0, ++ PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ /* Not a 100% sure this is the right kdev in all cases. */ ++ if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) { ++#else ++ if (dma_mapping_error(dev->sg->busaddr[idx])) { ++#endif ++ return -ENOMEM; ++ } ++ } ++ ++ frame = (uint32_t) dev->sg->busaddr[idx]; ++ INSTANCE_WR(*gpuobj, instance_offset, ++ frame | pte_flags); ++ ++ idx++; ++ instance_offset ++; ++ } ++ } ++ } else { ++ uint32_t flags0, flags5; ++ ++ if (target == NV_DMA_TARGET_VIDMEM) { ++ flags0 = 0x00190000; ++ flags5 = 0x00010000; ++ } else { ++ flags0 = 0x7fc00000; ++ flags5 = 0x00080000; ++ } ++ ++ INSTANCE_WR(*gpuobj, 0, flags0 | class); ++ INSTANCE_WR(*gpuobj, 1, offset + size - 1); ++ INSTANCE_WR(*gpuobj, 2, offset); ++ INSTANCE_WR(*gpuobj, 5, flags5); ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_SW; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++int ++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, ++ uint64_t offset, uint64_t size, int access, ++ struct nouveau_gpuobj **gpuobj, ++ uint32_t *o_ret) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || ++ (dev_priv->card_type >= NV_50 && ++ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ offset, size, access, ++ NV_DMA_TARGET_AGP, gpuobj); ++ if (o_ret) ++ *o_ret = 0; ++ } else ++ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { ++ *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ if (offset & ~0xffffffffULL) { ++ DRM_ERROR("obj offset exceeds 32-bits\n"); ++ return -EINVAL; ++ } ++ if (o_ret) ++ *o_ret = (uint32_t)offset; ++ ret = (*gpuobj != NULL) ? 0 : -EINVAL; ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ return -EINVAL; ++ } ++ ++ return ret; ++} ++ ++/* Context objects in the instance RAM have the following structure. ++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. ++ ++ NV4 - NV30: ++ ++ entry[0] ++ 11:0 class ++ 12 chroma key enable ++ 13 user clip enable ++ 14 swizzle enable ++ 17:15 patch config: ++ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre ++ 18 synchronize enable ++ 19 endian: 1 big, 0 little ++ 21:20 dither mode ++ 23 single step enable ++ 24 patch status: 0 invalid, 1 valid ++ 25 context_surface 0: 1 valid ++ 26 context surface 1: 1 valid ++ 27 context pattern: 1 valid ++ 28 context rop: 1 valid ++ 29,30 context beta, beta4 ++ entry[1] ++ 7:0 mono format ++ 15:8 color format ++ 31:16 notify instance address ++ entry[2] ++ 15:0 dma 0 instance address ++ 31:16 dma 1 instance address ++ entry[3] ++ dma method traps ++ ++ NV40: ++ No idea what the exact format is. Here's what can be deducted: ++ ++ entry[0]: ++ 11:0 class (maybe uses more bits here?) ++ 17 user clip enable ++ 21:19 patch config ++ 25 patch status valid ? ++ entry[1]: ++ 15:0 DMA notifier (maybe 20:0) ++ entry[2]: ++ 15:0 DMA 0 instance (maybe 20:0) ++ 24 big endian ++ entry[3]: ++ 15:0 DMA 1 instance (maybe 20:0) ++ entry[4]: ++ entry[5]: ++ set to 0? ++*/ ++int ++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, ++ struct nouveau_gpuobj **gpuobj) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class); ++ ++ ret = nouveau_gpuobj_new(dev, chan, ++ nouveau_gpuobj_class_instmem_size(dev, class), ++ 16, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ gpuobj); ++ if (ret) { ++ DRM_ERROR("Error creating gpuobj: %d\n", ret); ++ return ret; ++ } ++ ++ if (dev_priv->card_type >= NV_50) { ++ INSTANCE_WR(*gpuobj, 0, class); ++ INSTANCE_WR(*gpuobj, 5, 0x00010000); ++ } else { ++ switch (class) { ++ case NV_CLASS_NULL: ++ INSTANCE_WR(*gpuobj, 0, 0x00001030); ++ INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); ++ break; ++ default: ++ if (dev_priv->card_type >= NV_40) { ++ INSTANCE_WR(*gpuobj, 0, class); ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 2, 0x01000000); ++#endif ++ } else { ++#ifdef __BIG_ENDIAN ++ INSTANCE_WR(*gpuobj, 0, class | 0x00080000); ++#else ++ INSTANCE_WR(*gpuobj, 0, class); ++#endif ++ } ++ } ++ } ++ ++ (*gpuobj)->engine = NVOBJ_ENGINE_GR; ++ (*gpuobj)->class = class; ++ return 0; ++} ++ ++static int ++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *pramin = NULL; ++ int size, base, ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /* Base amount for object storage (4KiB enough?) */ ++ size = 0x1000; ++ base = 0; ++ ++ /* PGRAPH context */ ++ ++ if (dev_priv->card_type == NV_50) { ++ /* Various fixed table thingos */ ++ size += 0x1400; /* mostly unknown stuff */ ++ size += 0x4000; /* vm pd */ ++ base = 0x6000; ++ /* RAMHT, not sure about setting size yet, 32KiB to be safe */ ++ size += 0x8000; ++ /* RAMFC */ ++ size += 0x1000; ++ /* PGRAPH context */ ++ size += 0x70000; ++ } ++ ++ DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", ++ chan->id, size, base); ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, ++ &chan->ramin); ++ if (ret) { ++ DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); ++ return ret; ++ } ++ pramin = chan->ramin->gpuobj; ++ ++ ret = nouveau_mem_init_heap(&chan->ramin_heap, ++ pramin->im_pramin->start + base, size); ++ if (ret) { ++ DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ++ uint32_t vram_h, uint32_t tt_h) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *vram = NULL, *tt = NULL; ++ int ret, i; ++ ++ INIT_LIST_HEAD(&chan->ramht_refs); ++ ++ DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); ++ ++ /* Reserve a block of PRAMIN for the channel ++ *XXX: maybe on card_type == NV_50) { ++ ret = nouveau_gpuobj_channel_init_pramin(chan); ++ if (ret) ++ return ret; ++ } ++ ++ /* NV50 VM ++ * - Allocate per-channel page-directory ++ * - Point offset 0-512MiB at shared PCIEGART table ++ * - Point offset 512-1024MiB at shared VRAM table ++ */ ++ if (dev_priv->card_type >= NV_50) { ++ uint32_t vm_offset; ++ ++ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; ++ vm_offset += chan->ramin->gpuobj->im_pramin->start; ++ if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, ++ 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i=0; i<0x4000; i+=8) { ++ INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); ++ INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->gart_info.sg_ctxdma, ++ &chan->vm_gart_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (0+0)/4, ++ chan->vm_gart_pt->instance | 0x03); ++ INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0, ++ dev_priv->vm_vram_pt, ++ &chan->vm_vram_pt))) ++ return ret; ++ INSTANCE_WR(chan->vm_pd, (8+0)/4, ++ chan->vm_vram_pt->instance | 0x61); ++ INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000); ++ } ++ ++ /* RAMHT */ ++ if (dev_priv->card_type < NV_50) { ++ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, ++ 0x8000, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramht); ++ if (ret) ++ return ret; ++ } ++ ++ /* VRAM ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, 0x100000000ULL, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_AGP, &vram); ++ if (ret) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ } else ++ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev_priv->fb_available_size, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_VIDMEM, &vram))) { ++ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) { ++ DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ /* TT memory ctxdma */ ++ if (dev_priv->card_type >= NV_50) { ++ tt = vram; ++ } else ++ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ++ ret = nouveau_gpuobj_gart_dma_new(chan, 0, ++ dev_priv->gart_info.aper_size, ++ NV_DMA_ACCESS_RW, &tt, NULL); ++ } else ++ if (dev_priv->pci_heap) { ++ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, ++ 0, dev->sg->pages * PAGE_SIZE, ++ NV_DMA_ACCESS_RW, ++ NV_DMA_TARGET_PCI_NONLINEAR, &tt); ++ } else { ++ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); ++ ret = -EINVAL; ++ } ++ ++ if (ret) { ++ DRM_ERROR("Error creating TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); ++ if (ret) { ++ DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct list_head *entry, *tmp; ++ struct nouveau_gpuobj_ref *ref; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ list_for_each_safe(entry, tmp, &chan->ramht_refs) { ++ ref = list_entry(entry, struct nouveau_gpuobj_ref, list); ++ ++ nouveau_gpuobj_ref_del(dev, &ref); ++ } ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramht); ++ ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); ++ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt); ++ ++ if (chan->ramin_heap) ++ nouveau_mem_takedown(&chan->ramin_heap); ++ if (chan->ramin) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ ++} ++ ++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct nouveau_channel *chan; ++ struct drm_nouveau_grobj_alloc *init = data; ++ struct nouveau_gpuobj *gr = NULL; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); ++ ++ //FIXME: check args, only allow trusted objects to be created ++ ++ if (init->handle == ~0) ++ return -EINVAL; ++ ++ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) ++ return -EEXIST; ++ ++ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); ++ if (ret) { ++ DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", ++ ret, init->channel, init->handle); ++ return ret; ++ } ++ ++ if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) { ++ DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", ++ ret, init->channel, init->handle); ++ nouveau_gpuobj_del(dev, &gr); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ struct drm_nouveau_gpuobj_free *objfree = data; ++ struct nouveau_gpuobj_ref *ref; ++ struct nouveau_channel *chan; ++ int ret; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); ++ ++ if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref))) ++ return ret; ++ nouveau_gpuobj_ref_del(dev, &ref); ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_reg.h git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h +--- git/drivers/gpu/drm-tungsten/nouveau_reg.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,593 @@ ++ ++ ++#define NV03_BOOT_0 0x00100000 ++# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 ++# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 ++# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 ++# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 ++# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 ++# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 ++# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 ++# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 ++ ++#define NV04_FIFO_DATA 0x0010020c ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 ++# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 ++ ++#define NV_RAMIN 0x00700000 ++ ++#define NV_RAMHT_HANDLE_OFFSET 0 ++#define NV_RAMHT_CONTEXT_OFFSET 4 ++# define NV_RAMHT_CONTEXT_VALID (1<<31) ++# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 ++# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 ++# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 ++# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 ++# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 ++# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 ++# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 ++ ++/* DMA object defines */ ++#define NV_DMA_ACCESS_RW 0 ++#define NV_DMA_ACCESS_RO 1 ++#define NV_DMA_ACCESS_WO 2 ++#define NV_DMA_TARGET_VIDMEM 0 ++#define NV_DMA_TARGET_PCI 2 ++#define NV_DMA_TARGET_AGP 3 ++/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/ ++#define NV_DMA_TARGET_PCI_NONLINEAR 8 ++ ++/* Some object classes we care about in the drm */ ++#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 ++#define NV_CLASS_DMA_TO_MEMORY 0x00000003 ++#define NV_CLASS_NULL 0x00000030 ++#define NV_CLASS_DMA_IN_MEMORY 0x0000003D ++ ++#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE)) ++#define NV03_USER__SIZE 16 ++#define NV10_USER__SIZE 32 ++#define NV03_USER_SIZE 0x00010000 ++#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_PUT__SIZE 16 ++#define NV10_USER_DMA_PUT__SIZE 32 ++#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE)) ++#define NV03_USER_DMA_GET__SIZE 16 ++#define NV10_USER_DMA_GET__SIZE 32 ++#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE)) ++#define NV03_USER_REF_CNT__SIZE 16 ++#define NV10_USER_REF_CNT__SIZE 32 ++ ++#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE)) ++#define NV40_USER_SIZE 0x00001000 ++#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_PUT__SIZE 32 ++#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE)) ++#define NV40_USER_DMA_GET__SIZE 32 ++#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE)) ++#define NV40_USER_REF_CNT__SIZE 32 ++ ++#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE)) ++#define NV50_USER_SIZE 0x00002000 ++#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_PUT__SIZE 128 ++#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE)) ++#define NV50_USER_DMA_GET__SIZE 128 ++/*XXX: I don't think this actually exists.. */ ++#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE)) ++#define NV50_USER_REF_CNT__SIZE 128 ++ ++#define NV03_FIFO_SIZE 0x8000UL ++ ++#define NV03_PMC_BOOT_0 0x00000000 ++#define NV03_PMC_BOOT_1 0x00000004 ++#define NV03_PMC_INTR_0 0x00000100 ++# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8) ++# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12) ++# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21) ++# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24) ++# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25) ++# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26) ++# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24) ++#define NV03_PMC_INTR_EN_0 0x00000140 ++# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<< 0) ++#define NV03_PMC_ENABLE 0x00000200 ++# define NV_PMC_ENABLE_PFIFO (1<< 8) ++# define NV_PMC_ENABLE_PGRAPH (1<<12) ++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets, ++ * the card will hang early on in the X init process. ++ */ ++# define NV_PMC_ENABLE_UNK13 (1<<13) ++#define NV40_PMC_1700 0x00001700 ++#define NV40_PMC_1704 0x00001704 ++#define NV40_PMC_1708 0x00001708 ++#define NV40_PMC_170C 0x0000170C ++ ++/* probably PMC ? */ ++#define NV50_PUNK_BAR0_PRAMIN 0x00001700 ++#define NV50_PUNK_BAR_CFG_BASE 0x00001704 ++#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) ++#define NV50_PUNK_BAR1_CTXDMA 0x00001708 ++#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_BAR3_CTXDMA 0x0000170C ++#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) ++#define NV50_PUNK_UNK1710 0x00001710 ++ ++#define NV04_PBUS_PCI_NV_1 0x00001804 ++#define NV04_PBUS_PCI_NV_19 0x0000184C ++ ++#define NV04_PTIMER_INTR_0 0x00009100 ++#define NV04_PTIMER_INTR_EN_0 0x00009140 ++#define NV04_PTIMER_NUMERATOR 0x00009200 ++#define NV04_PTIMER_DENOMINATOR 0x00009210 ++#define NV04_PTIMER_TIME_0 0x00009400 ++#define NV04_PTIMER_TIME_1 0x00009410 ++#define NV04_PTIMER_ALARM_0 0x00009420 ++ ++#define NV50_I2C_CONTROLLER 0x0000E054 ++ ++#define NV04_PFB_CFG0 0x00100200 ++#define NV04_PFB_CFG1 0x00100204 ++#define NV40_PFB_020C 0x0010020C ++#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) ++#define NV10_PFB_TILE__SIZE 8 ++#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) ++#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) ++#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16)) ++#define NV10_PFB_CLOSE_PAGE2 0x0010033C ++#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) ++#define NV40_PFB_TILE__SIZE_0 12 ++#define NV40_PFB_TILE__SIZE_1 15 ++#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) ++#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) ++#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16)) ++#define NV40_PFB_UNK_800 0x00100800 ++ ++#define NV04_PGRAPH_DEBUG_0 0x00400080 ++#define NV04_PGRAPH_DEBUG_1 0x00400084 ++#define NV04_PGRAPH_DEBUG_2 0x00400088 ++#define NV04_PGRAPH_DEBUG_3 0x0040008c ++#define NV10_PGRAPH_DEBUG_4 0x00400090 ++#define NV03_PGRAPH_INTR 0x00400100 ++#define NV03_PGRAPH_NSTATUS 0x00400104 ++# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11) ++# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12) ++# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13) ++# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14) ++# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23) ++# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24) ++# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25) ++# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26) ++#define NV03_PGRAPH_NSOURCE 0x00400108 ++# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0) ++# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1) ++# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2) ++# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3) ++# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4) ++# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5) ++# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6) ++# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7) ++# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8) ++# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9) ++# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10) ++# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11) ++# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12) ++# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13) ++# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14) ++# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15) ++# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17) ++# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18) ++#define NV03_PGRAPH_INTR_EN 0x00400140 ++#define NV40_PGRAPH_INTR_EN 0x0040013C ++# define NV_PGRAPH_INTR_NOTIFY (1<< 0) ++# define NV_PGRAPH_INTR_MISSING_HW (1<< 4) ++# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) ++# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) ++# define NV_PGRAPH_INTR_ERROR (1<<20) ++#define NV10_PGRAPH_CTX_CONTROL 0x00400144 ++#define NV10_PGRAPH_CTX_USER 0x00400148 ++#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C ++#define NV10_PGRAPH_CTX_SWITCH2 0x00400150 ++#define NV10_PGRAPH_CTX_SWITCH3 0x00400154 ++#define NV10_PGRAPH_CTX_SWITCH4 0x00400158 ++#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C ++#define NV04_PGRAPH_CTX_SWITCH1 0x00400160 ++#define NV10_PGRAPH_CTX_CACHE1 0x00400160 ++#define NV04_PGRAPH_CTX_SWITCH2 0x00400164 ++#define NV04_PGRAPH_CTX_SWITCH3 0x00400168 ++#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C ++#define NV04_PGRAPH_CTX_CONTROL 0x00400170 ++#define NV04_PGRAPH_CTX_USER 0x00400174 ++#define NV04_PGRAPH_CTX_CACHE1 0x00400180 ++#define NV10_PGRAPH_CTX_CACHE2 0x00400180 ++#define NV03_PGRAPH_CTX_CONTROL 0x00400190 ++#define NV03_PGRAPH_CTX_USER 0x00400194 ++#define NV04_PGRAPH_CTX_CACHE2 0x004001A0 ++#define NV10_PGRAPH_CTX_CACHE3 0x004001A0 ++#define NV04_PGRAPH_CTX_CACHE3 0x004001C0 ++#define NV10_PGRAPH_CTX_CACHE4 0x004001C0 ++#define NV04_PGRAPH_CTX_CACHE4 0x004001E0 ++#define NV10_PGRAPH_CTX_CACHE5 0x004001E0 ++#define NV40_PGRAPH_CTXCTL_0304 0x00400304 ++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 ++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff ++#define NV40_PGRAPH_CTXCTL_0310 0x00400310 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 ++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 ++#define NV40_PGRAPH_CTXCTL_030C 0x0040030c ++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324 ++#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328 ++#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c ++#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000 ++#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF ++#define NV03_PGRAPH_ABS_X_RAM 0x00400400 ++#define NV03_PGRAPH_ABS_Y_RAM 0x00400480 ++#define NV03_PGRAPH_X_MISC 0x00400500 ++#define NV03_PGRAPH_Y_MISC 0x00400504 ++#define NV04_PGRAPH_VALID1 0x00400508 ++#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C ++#define NV04_PGRAPH_MISC24_0 0x00400510 ++#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514 ++#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518 ++#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C ++#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520 ++#define NV03_PGRAPH_CLIPX_0 0x00400524 ++#define NV03_PGRAPH_CLIPX_1 0x00400528 ++#define NV03_PGRAPH_CLIPY_0 0x0040052C ++#define NV03_PGRAPH_CLIPY_1 0x00400530 ++#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534 ++#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538 ++#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C ++#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540 ++#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544 ++#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548 ++#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 ++#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 ++#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 ++#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C ++#define NV04_PGRAPH_MISC24_1 0x00400570 ++#define NV04_PGRAPH_MISC24_2 0x00400574 ++#define NV04_PGRAPH_VALID2 0x00400578 ++#define NV04_PGRAPH_PASSTHRU_0 0x0040057C ++#define NV04_PGRAPH_PASSTHRU_1 0x00400580 ++#define NV04_PGRAPH_PASSTHRU_2 0x00400584 ++#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588 ++#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C ++#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590 ++#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594 ++#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598 ++#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C ++#define NV04_PGRAPH_FORMAT_0 0x004005A8 ++#define NV04_PGRAPH_FORMAT_1 0x004005AC ++#define NV04_PGRAPH_FILTER_0 0x004005B0 ++#define NV04_PGRAPH_FILTER_1 0x004005B4 ++#define NV03_PGRAPH_MONO_COLOR0 0x00400600 ++#define NV04_PGRAPH_ROP3 0x00400604 ++#define NV04_PGRAPH_BETA_AND 0x00400608 ++#define NV04_PGRAPH_BETA_PREMULT 0x0040060C ++#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610 ++#define NV04_PGRAPH_FORMATS 0x00400618 ++#define NV10_PGRAPH_DEBUG_2 0x00400620 ++#define NV04_PGRAPH_BOFFSET0 0x00400640 ++#define NV04_PGRAPH_BOFFSET1 0x00400644 ++#define NV04_PGRAPH_BOFFSET2 0x00400648 ++#define NV04_PGRAPH_BOFFSET3 0x0040064C ++#define NV04_PGRAPH_BOFFSET4 0x00400650 ++#define NV04_PGRAPH_BOFFSET5 0x00400654 ++#define NV04_PGRAPH_BBASE0 0x00400658 ++#define NV04_PGRAPH_BBASE1 0x0040065C ++#define NV04_PGRAPH_BBASE2 0x00400660 ++#define NV04_PGRAPH_BBASE3 0x00400664 ++#define NV04_PGRAPH_BBASE4 0x00400668 ++#define NV04_PGRAPH_BBASE5 0x0040066C ++#define NV04_PGRAPH_BPITCH0 0x00400670 ++#define NV04_PGRAPH_BPITCH1 0x00400674 ++#define NV04_PGRAPH_BPITCH2 0x00400678 ++#define NV04_PGRAPH_BPITCH3 0x0040067C ++#define NV04_PGRAPH_BPITCH4 0x00400680 ++#define NV04_PGRAPH_BLIMIT0 0x00400684 ++#define NV04_PGRAPH_BLIMIT1 0x00400688 ++#define NV04_PGRAPH_BLIMIT2 0x0040068C ++#define NV04_PGRAPH_BLIMIT3 0x00400690 ++#define NV04_PGRAPH_BLIMIT4 0x00400694 ++#define NV04_PGRAPH_BLIMIT5 0x00400698 ++#define NV04_PGRAPH_BSWIZZLE2 0x0040069C ++#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 ++#define NV03_PGRAPH_STATUS 0x004006B0 ++#define NV04_PGRAPH_STATUS 0x00400700 ++#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 ++#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 ++#define NV04_PGRAPH_SURFACE 0x0040070C ++#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C ++#define NV04_PGRAPH_STATE 0x00400710 ++#define NV10_PGRAPH_SURFACE 0x00400710 ++#define NV04_PGRAPH_NOTIFY 0x00400714 ++#define NV10_PGRAPH_STATE 0x00400714 ++#define NV10_PGRAPH_NOTIFY 0x00400718 ++ ++#define NV04_PGRAPH_FIFO 0x00400720 ++ ++#define NV04_PGRAPH_BPIXEL 0x00400724 ++#define NV10_PGRAPH_RDI_INDEX 0x00400750 ++#define NV04_PGRAPH_FFINTFC_ST2 0x00400754 ++#define NV10_PGRAPH_RDI_DATA 0x00400754 ++#define NV04_PGRAPH_DMA_PITCH 0x00400760 ++#define NV10_PGRAPH_FFINTFC_ST2 0x00400764 ++#define NV04_PGRAPH_DVD_COLORFMT 0x00400764 ++#define NV04_PGRAPH_SCALED_FORMAT 0x00400768 ++#define NV10_PGRAPH_DMA_PITCH 0x00400770 ++#define NV10_PGRAPH_DVD_COLORFMT 0x00400774 ++#define NV10_PGRAPH_SCALED_FORMAT 0x00400778 ++#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 ++#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001 ++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002 ++#define NV04_PGRAPH_PATT_COLOR0 0x00400800 ++#define NV04_PGRAPH_PATT_COLOR1 0x00400804 ++#define NV04_PGRAPH_PATTERN 0x00400808 ++#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810 ++#define NV04_PGRAPH_CHROMA 0x00400814 ++#define NV04_PGRAPH_CONTROL0 0x00400818 ++#define NV04_PGRAPH_CONTROL1 0x0040081C ++#define NV04_PGRAPH_CONTROL2 0x00400820 ++#define NV04_PGRAPH_BLEND 0x00400824 ++#define NV04_PGRAPH_STORED_FMT 0x00400830 ++#define NV04_PGRAPH_PATT_COLORRAM 0x00400900 ++#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16)) ++#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16)) ++#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) ++#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) ++#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) ++#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16)) ++#define NV04_PGRAPH_U_RAM 0x00400D00 ++#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16)) ++#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16)) ++#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16)) ++#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16)) ++#define NV04_PGRAPH_V_RAM 0x00400D40 ++#define NV04_PGRAPH_W_RAM 0x00400D80 ++#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 ++#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 ++#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 ++#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C ++#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50 ++#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54 ++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58 ++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C ++#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60 ++#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64 ++#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68 ++#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C ++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00 ++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20 ++#define NV10_PGRAPH_XFMODE0 0x00400F40 ++#define NV10_PGRAPH_XFMODE1 0x00400F44 ++#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48 ++#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C ++#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50 ++#define NV10_PGRAPH_PIPE_DATA 0x00400F54 ++#define NV04_PGRAPH_DMA_START_0 0x00401000 ++#define NV04_PGRAPH_DMA_START_1 0x00401004 ++#define NV04_PGRAPH_DMA_LENGTH 0x00401008 ++#define NV04_PGRAPH_DMA_MISC 0x0040100C ++#define NV04_PGRAPH_DMA_DATA_0 0x00401020 ++#define NV04_PGRAPH_DMA_DATA_1 0x00401024 ++#define NV04_PGRAPH_DMA_RM 0x00401030 ++#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040 ++#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044 ++#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048 ++#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C ++#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050 ++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054 ++#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058 ++#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C ++#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060 ++#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080 ++#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084 ++#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088 ++#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C ++#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090 ++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094 ++#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 ++#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C ++#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 ++#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) ++#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) ++#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) ++#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) ++ ++ ++/* It's a guess that this works on NV03. Confirmed on NV04, though */ ++#define NV04_PFIFO_DELAY_0 0x00002040 ++#define NV04_PFIFO_DMA_TIMESLICE 0x00002044 ++#define NV04_PFIFO_NEXT_CHANNEL 0x00002050 ++#define NV03_PFIFO_INTR_0 0x00002100 ++#define NV03_PFIFO_INTR_EN_0 0x00002140 ++# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0) ++# define NV_PFIFO_INTR_RUNOUT (1<< 4) ++# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8) ++# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) ++# define NV_PFIFO_INTR_DMA_PT (1<<16) ++# define NV_PFIFO_INTR_SEMAPHORE (1<<20) ++# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) ++#define NV03_PFIFO_RAMHT 0x00002210 ++#define NV03_PFIFO_RAMFC 0x00002214 ++#define NV03_PFIFO_RAMRO 0x00002218 ++#define NV40_PFIFO_RAMFC 0x00002220 ++#define NV03_PFIFO_CACHES 0x00002500 ++#define NV04_PFIFO_MODE 0x00002504 ++#define NV04_PFIFO_DMA 0x00002508 ++#define NV04_PFIFO_SIZE 0x0000250c ++#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) ++#define NV50_PFIFO_CTX_TABLE__SIZE 128 ++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31) ++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30) ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF ++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF ++#define NV03_PFIFO_CACHE0_PUSH0 0x00003000 ++#define NV03_PFIFO_CACHE0_PULL0 0x00003040 ++#define NV04_PFIFO_CACHE0_PULL0 0x00003050 ++#define NV04_PFIFO_CACHE0_PULL1 0x00003054 ++#define NV03_PFIFO_CACHE1_PUSH0 0x00003200 ++#define NV03_PFIFO_CACHE1_PUSH1 0x00003204 ++#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8) ++#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16) ++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f ++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f ++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f ++#define NV03_PFIFO_CACHE1_PUT 0x00003210 ++#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220 ++#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0 ++# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000 ++# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000 ++# define NV_PFIFO_CACHE1_ENDIAN 0x80000000 ++# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF ++# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000 ++#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228 ++#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c ++#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230 ++#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240 ++#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244 ++#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248 ++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C ++#define NV03_PFIFO_CACHE1_PULL0 0x00003240 ++#define NV04_PFIFO_CACHE1_PULL0 0x00003250 ++#define NV03_PFIFO_CACHE1_PULL1 0x00003250 ++#define NV04_PFIFO_CACHE1_PULL1 0x00003254 ++#define NV04_PFIFO_CACHE1_HASH 0x00003258 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260 ++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264 ++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268 ++#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C ++#define NV03_PFIFO_CACHE1_GET 0x00003270 ++#define NV04_PFIFO_CACHE1_ENGINE 0x00003280 ++#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0 ++#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 ++#define NV40_PFIFO_UNK32E4 0x000032E4 ++#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8)) ++#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8)) ++#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8)) ++#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8)) ++ ++#define NV_CRTC0_INTSTAT 0x00600100 ++#define NV_CRTC0_INTEN 0x00600140 ++#define NV_CRTC1_INTSTAT 0x00602100 ++#define NV_CRTC1_INTEN 0x00602140 ++# define NV_CRTC_INTR_VBLANK (1<<0) ++ ++/* This name is a partial guess. */ ++#define NV50_DISPLAY_SUPERVISOR 0x00610024 ++ ++/* Fifo commands. These are not regs, neither masks */ ++#define NV03_FIFO_CMD_JUMP 0x20000000 ++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc ++#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) ++ ++/* RAMFC offsets */ ++#define NV04_RAMFC_DMA_PUT 0x00 ++#define NV04_RAMFC_DMA_GET 0x04 ++#define NV04_RAMFC_DMA_INSTANCE 0x08 ++#define NV04_RAMFC_DMA_STATE 0x0C ++#define NV04_RAMFC_DMA_FETCH 0x10 ++#define NV04_RAMFC_ENGINE 0x14 ++#define NV04_RAMFC_PULL1_ENGINE 0x18 ++ ++#define NV10_RAMFC_DMA_PUT 0x00 ++#define NV10_RAMFC_DMA_GET 0x04 ++#define NV10_RAMFC_REF_CNT 0x08 ++#define NV10_RAMFC_DMA_INSTANCE 0x0C ++#define NV10_RAMFC_DMA_STATE 0x10 ++#define NV10_RAMFC_DMA_FETCH 0x14 ++#define NV10_RAMFC_ENGINE 0x18 ++#define NV10_RAMFC_PULL1_ENGINE 0x1C ++#define NV10_RAMFC_ACQUIRE_VALUE 0x20 ++#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24 ++#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28 ++#define NV10_RAMFC_SEMAPHORE 0x2C ++#define NV10_RAMFC_DMA_SUBROUTINE 0x30 ++ ++#define NV40_RAMFC_DMA_PUT 0x00 ++#define NV40_RAMFC_DMA_GET 0x04 ++#define NV40_RAMFC_REF_CNT 0x08 ++#define NV40_RAMFC_DMA_INSTANCE 0x0C ++#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10 ++#define NV40_RAMFC_DMA_STATE 0x14 ++#define NV40_RAMFC_DMA_FETCH 0x18 ++#define NV40_RAMFC_ENGINE 0x1C ++#define NV40_RAMFC_PULL1_ENGINE 0x20 ++#define NV40_RAMFC_ACQUIRE_VALUE 0x24 ++#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28 ++#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C ++#define NV40_RAMFC_SEMAPHORE 0x30 ++#define NV40_RAMFC_DMA_SUBROUTINE 0x34 ++#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38 ++#define NV40_RAMFC_DMA_TIMESLICE 0x3C ++#define NV40_RAMFC_UNK_40 0x40 ++#define NV40_RAMFC_UNK_44 0x44 ++#define NV40_RAMFC_UNK_48 0x48 ++#define NV40_RAMFC_UNK_4C 0x4C ++#define NV40_RAMFC_UNK_50 0x50 +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_sgdma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c +--- git/drivers/gpu/drm-tungsten/nouveau_sgdma.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,349 @@ ++#include "drmP.h" ++#include "nouveau_drv.h" ++ ++#define NV_CTXDMA_PAGE_SHIFT 12 ++#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) ++#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) ++ ++struct nouveau_sgdma_be { ++ struct drm_ttm_backend backend; ++ struct drm_device *dev; ++ ++ int pages; ++ int pages_populated; ++ dma_addr_t *pagelist; ++ int is_bound; ++ ++ unsigned int pte_start; ++}; ++ ++static int ++nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) ++{ ++ return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); ++} ++ ++static int ++nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, ++ struct page **pages, struct page *dummy_read_page) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int p, d, o; ++ ++ DRM_DEBUG("num_pages = %ld\n", num_pages); ++ ++ if (nvbe->pagelist) ++ return -EINVAL; ++ nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; ++ nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ ++ nvbe->pages_populated = d = 0; ++ for (p = 0; p < num_pages; p++) { ++ for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { ++ struct page *page = pages[p]; ++ if (!page) ++ page = dummy_read_page; ++ nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, ++ page, o, ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) { ++#else ++ if (pci_dma_mapping_error(nvbe->pagelist[d])) { ++#endif ++ be->func->clear(be); ++ DRM_ERROR("pci_map_page failed\n"); ++ return -EINVAL; ++ } ++ nvbe->pages_populated = ++d; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_clear(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ int d; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe && nvbe->pagelist) { ++ if (nvbe->is_bound) ++ be->func->unbind(be); ++ ++ for (d = 0; d < nvbe->pages_populated; d++) { ++ pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], ++ NV_CTXDMA_PAGE_SIZE, ++ PCI_DMA_BIDIRECTIONAL); ++ } ++ drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), ++ DRM_MEM_PAGES); ++ } ++} ++ ++static int ++nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); ++ uint32_t i; ++ ++ DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, ++ offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); ++ ++ if (offset & NV_CTXDMA_PAGE_MASK) ++ return -EINVAL; ++ nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) ++ nvbe->pte_start += 2; /* skip ctxdma header */ ++ ++ for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { ++ uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; ++ ++ if (pteval & NV_CTXDMA_PAGE_MASK) { ++ DRM_ERROR("Bad pteval 0x%llx\n", pteval); ++ return -EINVAL; ++ } ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, i, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); ++ } ++ } ++ ++ nvbe->is_bound = 1; ++ return 0; ++} ++ ++static int ++nouveau_sgdma_unbind(struct drm_ttm_backend *be) ++{ ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ if (nvbe->is_bound) { ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ unsigned int pte; ++ ++ pte = nvbe->pte_start; ++ while (pte < (nvbe->pte_start + nvbe->pages)) { ++ uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; ++ ++ if (dev_priv->card_type < NV_50) { ++ INSTANCE_WR(gpuobj, pte, pteval | 3); ++ } else { ++ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21); ++ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000); ++ } ++ ++ pte++; ++ } ++ ++ nvbe->is_bound = 0; ++ } ++ ++ return 0; ++} ++ ++static void ++nouveau_sgdma_destroy(struct drm_ttm_backend *be) ++{ ++ DRM_DEBUG("\n"); ++ if (be) { ++ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; ++ if (nvbe) { ++ if (nvbe->pagelist) ++ be->func->clear(be); ++ drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); ++ } ++ } ++} ++ ++static struct drm_ttm_backend_func nouveau_sgdma_backend = { ++ .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, ++ .populate = nouveau_sgdma_populate, ++ .clear = nouveau_sgdma_clear, ++ .bind = nouveau_sgdma_bind, ++ .unbind = nouveau_sgdma_unbind, ++ .destroy = nouveau_sgdma_destroy ++}; ++ ++struct drm_ttm_backend * ++nouveau_sgdma_init_ttm(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_sgdma_be *nvbe; ++ ++ if (!dev_priv->gart_info.sg_ctxdma) ++ return NULL; ++ ++ nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); ++ if (!nvbe) ++ return NULL; ++ ++ nvbe->dev = dev; ++ ++ nvbe->backend.func = &nouveau_sgdma_backend; ++ ++ return &nvbe->backend; ++} ++ ++int ++nouveau_sgdma_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = NULL; ++ uint32_t aper_size, obj_size; ++ int i, ret; ++ ++ if (dev_priv->card_type < NV_50) { ++ aper_size = (64 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; ++ obj_size += 8; /* ctxdma header */ ++ } else { ++ /* 1 entire VM page table */ ++ aper_size = (512 * 1024 * 1024); ++ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; ++ } ++ ++ if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, ++ NVOBJ_FLAG_ALLOW_NO_REFS | ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { ++ DRM_ERROR("Error creating sgdma object: %d\n", ret); ++ return ret; ++ } ++ ++ dev_priv->gart_info.sg_dummy_page = ++ alloc_page(GFP_KERNEL|__GFP_DMA32); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) ++ set_page_locked(dev_priv->gart_info.sg_dummy_page); ++#else ++ SetPageLocked(dev_priv->gart_info.sg_dummy_page); ++#endif ++ dev_priv->gart_info.sg_dummy_bus = ++ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, ++ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ ++ if (dev_priv->card_type < NV_50) { ++ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and ++ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE ++ * on those cards? */ ++ INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | ++ (1 << 12) /* PT present */ | ++ (0 << 13) /* PT *not* linear */ | ++ (NV_DMA_ACCESS_RW << 14) | ++ (NV_DMA_TARGET_PCI << 16)); ++ INSTANCE_WR(gpuobj, 1, aper_size - 1); ++ for (i=2; i<2+(aper_size>>12); i++) { ++ INSTANCE_WR(gpuobj, i, ++ dev_priv->gart_info.sg_dummy_bus | 3); ++ } ++ } else { ++ for (i=0; igart_info.sg_dummy_bus | 0x21); ++ INSTANCE_WR(gpuobj, (i+4)/4, 0); ++ } ++ } ++ ++ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; ++ dev_priv->gart_info.aper_base = 0; ++ dev_priv->gart_info.aper_size = aper_size; ++ dev_priv->gart_info.sg_ctxdma = gpuobj; ++ return 0; ++} ++ ++void ++nouveau_sgdma_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->gart_info.sg_dummy_page) { ++ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, ++ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ++ unlock_page(dev_priv->gart_info.sg_dummy_page); ++ __free_page(dev_priv->gart_info.sg_dummy_page); ++ dev_priv->gart_info.sg_dummy_page = NULL; ++ dev_priv->gart_info.sg_dummy_bus = 0; ++ } ++ ++ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); ++} ++ ++int ++nouveau_sgdma_nottm_hack_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_ttm_backend *be; ++ struct drm_scatter_gather sgreq; ++ struct drm_mm_node mm_node; ++ struct drm_bo_mem_reg mem; ++ int ret; ++ ++ dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); ++ if (!dev_priv->gart_info.sg_be) ++ return -ENOMEM; ++ be = dev_priv->gart_info.sg_be; ++ ++ /* Hack the aperture size down to the amount of system memory ++ * we're going to bind into it. ++ */ ++ if (dev_priv->gart_info.aper_size > 32*1024*1024) ++ dev_priv->gart_info.aper_size = 32*1024*1024; ++ ++ sgreq.size = dev_priv->gart_info.aper_size; ++ if ((ret = drm_sg_alloc(dev, &sgreq))) { ++ DRM_ERROR("drm_sg_alloc failed: %d\n", ret); ++ return ret; ++ } ++ dev_priv->gart_info.sg_handle = sgreq.handle; ++ ++ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) { ++ DRM_ERROR("failed populate: %d\n", ret); ++ return ret; ++ } ++ ++ mm_node.start = 0; ++ mem.mm_node = &mm_node; ++ ++ if ((ret = be->func->bind(be, &mem))) { ++ DRM_ERROR("failed bind: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; ++ int pte; ++ ++ pte = (offset >> NV_CTXDMA_PAGE_SHIFT); ++ if (dev_priv->card_type < NV_50) { ++ *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; ++ return 0; ++ } ++ ++ DRM_ERROR("Unimplemented on NV50\n"); ++ return -EINVAL; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_state.c git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c +--- git/drivers/gpu/drm-tungsten/nouveau_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,871 @@ ++/* ++ * Copyright 2005 Stephane Marchesin ++ * Copyright 2008 Stuart Bennett ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++static int nouveau_init_card_mappings(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ /* resource 0 is mmio regs */ ++ /* resource 1 is linear FB */ ++ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ ++ /* resource 6 is bios */ ++ ++ /* map the mmio regs */ ++ ret = drm_addmap(dev, drm_get_resource_start(dev, 0), ++ drm_get_resource_len(dev, 0), ++ _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret) { ++ DRM_ERROR("Unable to initialize the mmio mapping (%d). " ++ "Please report your setup to " DRIVER_EMAIL "\n", ++ ret); ++ return -EINVAL; ++ } ++ DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); ++ ++ /* map larger RAMIN aperture on NV40 cards */ ++ dev_priv->ramin = NULL; ++ if (dev_priv->card_type >= NV_40) { ++ int ramin_resource = 2; ++ if (drm_get_resource_len(dev, ramin_resource) == 0) ++ ramin_resource = 3; ++ ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, ramin_resource), ++ drm_get_resource_len(dev, ramin_resource), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to init RAMIN mapping, " ++ "limited instance memory available\n"); ++ dev_priv->ramin = NULL; ++ } ++ } ++ ++ /* On older cards (or if the above failed), create a map covering ++ * the BAR0 PRAMIN aperture */ ++ if (!dev_priv->ramin) { ++ ret = drm_addmap(dev, ++ drm_get_resource_start(dev, 0) + NV_RAMIN, ++ (1*1024*1024), ++ _DRM_REGISTERS, _DRM_READ_ONLY, ++ &dev_priv->ramin); ++ if (ret) { ++ DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int nouveau_stub_init(struct drm_device *dev) { return 0; } ++static void nouveau_stub_takedown(struct drm_device *dev) {} ++ ++static int nouveau_init_engine_ptrs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ switch (dev_priv->chipset & 0xf0) { ++ case 0x00: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv04_fb_init; ++ engine->fb.takedown = nv04_fb_takedown; ++ engine->graph.init = nv04_graph_init; ++ engine->graph.takedown = nv04_graph_takedown; ++ engine->graph.create_context = nv04_graph_create_context; ++ engine->graph.destroy_context = nv04_graph_destroy_context; ++ engine->graph.load_context = nv04_graph_load_context; ++ engine->graph.save_context = nv04_graph_save_context; ++ engine->fifo.channels = 16; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv04_fifo_channel_id; ++ engine->fifo.create_context = nv04_fifo_create_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; ++ engine->fifo.load_context = nv04_fifo_load_context; ++ engine->fifo.save_context = nv04_fifo_save_context; ++ break; ++ case 0x10: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv10_graph_init; ++ engine->graph.takedown = nv10_graph_takedown; ++ engine->graph.create_context = nv10_graph_create_context; ++ engine->graph.destroy_context = nv10_graph_destroy_context; ++ engine->graph.load_context = nv10_graph_load_context; ++ engine->graph.save_context = nv10_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x20: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv20_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x30: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv04_mc_init; ++ engine->mc.takedown = nv04_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv10_fb_init; ++ engine->fb.takedown = nv10_fb_takedown; ++ engine->graph.init = nv30_graph_init; ++ engine->graph.takedown = nv20_graph_takedown; ++ engine->graph.create_context = nv20_graph_create_context; ++ engine->graph.destroy_context = nv20_graph_destroy_context; ++ engine->graph.load_context = nv20_graph_load_context; ++ engine->graph.save_context = nv20_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nouveau_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv10_fifo_create_context; ++ engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.load_context = nv10_fifo_load_context; ++ engine->fifo.save_context = nv10_fifo_save_context; ++ break; ++ case 0x40: ++ case 0x60: ++ engine->instmem.init = nv04_instmem_init; ++ engine->instmem.takedown= nv04_instmem_takedown; ++ engine->instmem.populate = nv04_instmem_populate; ++ engine->instmem.clear = nv04_instmem_clear; ++ engine->instmem.bind = nv04_instmem_bind; ++ engine->instmem.unbind = nv04_instmem_unbind; ++ engine->mc.init = nv40_mc_init; ++ engine->mc.takedown = nv40_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nv40_fb_init; ++ engine->fb.takedown = nv40_fb_takedown; ++ engine->graph.init = nv40_graph_init; ++ engine->graph.takedown = nv40_graph_takedown; ++ engine->graph.create_context = nv40_graph_create_context; ++ engine->graph.destroy_context = nv40_graph_destroy_context; ++ engine->graph.load_context = nv40_graph_load_context; ++ engine->graph.save_context = nv40_graph_save_context; ++ engine->fifo.channels = 32; ++ engine->fifo.init = nv40_fifo_init; ++ engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.channel_id = nv10_fifo_channel_id; ++ engine->fifo.create_context = nv40_fifo_create_context; ++ engine->fifo.destroy_context = nv40_fifo_destroy_context; ++ engine->fifo.load_context = nv40_fifo_load_context; ++ engine->fifo.save_context = nv40_fifo_save_context; ++ break; ++ case 0x50: ++ case 0x80: /* gotta love NVIDIA's consistency.. */ ++ case 0x90: ++ case 0xA0: ++ engine->instmem.init = nv50_instmem_init; ++ engine->instmem.takedown= nv50_instmem_takedown; ++ engine->instmem.populate = nv50_instmem_populate; ++ engine->instmem.clear = nv50_instmem_clear; ++ engine->instmem.bind = nv50_instmem_bind; ++ engine->instmem.unbind = nv50_instmem_unbind; ++ engine->mc.init = nv50_mc_init; ++ engine->mc.takedown = nv50_mc_takedown; ++ engine->timer.init = nv04_timer_init; ++ engine->timer.read = nv04_timer_read; ++ engine->timer.takedown = nv04_timer_takedown; ++ engine->fb.init = nouveau_stub_init; ++ engine->fb.takedown = nouveau_stub_takedown; ++ engine->graph.init = nv50_graph_init; ++ engine->graph.takedown = nv50_graph_takedown; ++ engine->graph.create_context = nv50_graph_create_context; ++ engine->graph.destroy_context = nv50_graph_destroy_context; ++ engine->graph.load_context = nv50_graph_load_context; ++ engine->graph.save_context = nv50_graph_save_context; ++ engine->fifo.channels = 128; ++ engine->fifo.init = nv50_fifo_init; ++ engine->fifo.takedown = nv50_fifo_takedown; ++ engine->fifo.channel_id = nv50_fifo_channel_id; ++ engine->fifo.create_context = nv50_fifo_create_context; ++ engine->fifo.destroy_context = nv50_fifo_destroy_context; ++ engine->fifo.load_context = nv50_fifo_load_context; ++ engine->fifo.save_context = nv50_fifo_save_context; ++ break; ++ default: ++ DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++int ++nouveau_card_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine; ++ int ret; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) ++ return 0; ++ dev_priv->ttm = 0; ++ ++ /* Determine exact chipset we're running on */ ++ if (dev_priv->card_type < NV_10) ++ dev_priv->chipset = dev_priv->card_type; ++ else ++ dev_priv->chipset = ++ (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20; ++ ++ /* Initialise internal driver API hooks */ ++ ret = nouveau_init_engine_ptrs(dev); ++ if (ret) return ret; ++ engine = &dev_priv->Engine; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; ++ ++ ret = nouveau_gpuobj_early_init(dev); ++ if (ret) return ret; ++ ++ /* Initialise instance memory, must happen before mem_init so we ++ * know exactly how much VRAM we're able to use for "normal" ++ * purposes. ++ */ ++ ret = engine->instmem.init(dev); ++ if (ret) return ret; ++ ++ /* Setup the memory manager */ ++ if (dev_priv->ttm) { ++ ret = nouveau_mem_init_ttm(dev); ++ if (ret) return ret; ++ } else { ++ ret = nouveau_mem_init(dev); ++ if (ret) return ret; ++ } ++ ++ ret = nouveau_gpuobj_init(dev); ++ if (ret) return ret; ++ ++ /* Parse BIOS tables / Run init tables? */ ++ ++ /* PMC */ ++ ret = engine->mc.init(dev); ++ if (ret) return ret; ++ ++ /* PTIMER */ ++ ret = engine->timer.init(dev); ++ if (ret) return ret; ++ ++ /* PFB */ ++ ret = engine->fb.init(dev); ++ if (ret) return ret; ++ ++ /* PGRAPH */ ++ ret = engine->graph.init(dev); ++ if (ret) return ret; ++ ++ /* PFIFO */ ++ ret = engine->fifo.init(dev); ++ if (ret) return ret; ++ ++ /* this call irq_preinstall, register irq handler and ++ * call irq_postinstall ++ */ ++ ret = drm_irq_install(dev); ++ if (ret) return ret; ++ ++ /* what about PVIDEO/PCRTC/PRAMDAC etc? */ ++ ++ ret = nouveau_dma_channel_init(dev); ++ if (ret) return ret; ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; ++ return 0; ++} ++ ++static void nouveau_card_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ DRM_DEBUG("prev state = %d\n", dev_priv->init_state); ++ ++ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { ++ nouveau_dma_channel_takedown(dev); ++ ++ engine->fifo.takedown(dev); ++ engine->graph.takedown(dev); ++ engine->fb.takedown(dev); ++ engine->timer.takedown(dev); ++ engine->mc.takedown(dev); ++ ++ nouveau_sgdma_nottm_hack_takedown(dev); ++ nouveau_sgdma_takedown(dev); ++ ++ nouveau_gpuobj_takedown(dev); ++ nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt); ++ ++ nouveau_mem_close(dev); ++ engine->instmem.takedown(dev); ++ ++ drm_irq_uninstall(dev); ++ ++ nouveau_gpuobj_late_takedown(dev); ++ ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ } ++} ++ ++/* here a client dies, release the stuff that was allocated for its ++ * file_priv */ ++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_fifo_cleanup(dev, file_priv); ++ nouveau_mem_release(file_priv,dev_priv->fb_heap); ++ nouveau_mem_release(file_priv,dev_priv->agp_heap); ++ nouveau_mem_release(file_priv,dev_priv->pci_heap); ++} ++ ++/* first module load, setup the mmio/fb mapping */ ++int nouveau_firstopen(struct drm_device *dev) ++{ ++#if defined(__powerpc__) ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct device_node *dn; ++#endif ++ int ret; ++ /* Map any PCI resources we need on the card */ ++ ret = nouveau_init_card_mappings(dev); ++ if (ret) return ret; ++ ++#if defined(__powerpc__) ++ /* Put the card in BE mode if it's not */ ++ if (NV_READ(NV03_PMC_BOOT_1)) ++ NV_WRITE(NV03_PMC_BOOT_1,0x00000001); ++ ++ DRM_MEMORYBARRIER(); ++#endif ++ ++#if defined(__linux__) && defined(__powerpc__) ++ /* if we have an OF card, copy vbios to RAMIN */ ++ dn = pci_device_to_OF_node(dev->pdev); ++ if (dn) ++ { ++ int size; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) ++ const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); ++#else ++ const uint32_t *bios = get_property(dn, "NVDA,BMP", &size); ++#endif ++ if (bios) ++ { ++ int i; ++ for(i=0;iflags = flags & NOUVEAU_FLAGS; ++ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; ++ ++ DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); ++ ++ /* Time to determine the card architecture */ ++ regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8); ++ if (!regs) { ++ DRM_ERROR("Could not ioremap to determine register\n"); ++ return -ENOMEM; ++ } ++ ++ reg0 = readl(regs+NV03_PMC_BOOT_0); ++ reg1 = readl(regs+NV03_PMC_BOOT_1); ++#if defined(__powerpc__) ++ if (reg1) ++ reg0=___swab32(reg0); ++#endif ++ ++ /* We're dealing with >=NV10 */ ++ if ((reg0 & 0x0f000000) > 0 ) { ++ /* Bit 27-20 contain the architecture in hex */ ++ architecture = (reg0 & 0xff00000) >> 20; ++ /* NV04 or NV05 */ ++ } else if ((reg0 & 0xff00fff0) == 0x20004000) { ++ architecture = 0x04; ++ } ++ ++ iounmap(regs); ++ ++ if (architecture >= 0x80) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x60) { ++ /* FIXME we need to figure out who's who for NV6x */ ++ dev_priv->card_type = NV_44; ++ } else if (architecture >= 0x50) { ++ dev_priv->card_type = NV_50; ++ } else if (architecture >= 0x40) { ++ uint8_t subarch = architecture & 0xf; ++ /* Selection criteria borrowed from NV40EXA */ ++ if (NV40_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_40; ++ } else if (NV44_CHIPSET_MASK & (1 << subarch)) { ++ dev_priv->card_type = NV_44; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ } else if (architecture >= 0x30) { ++ dev_priv->card_type = NV_30; ++ } else if (architecture >= 0x20) { ++ dev_priv->card_type = NV_20; ++ } else if (architecture >= 0x17) { ++ dev_priv->card_type = NV_17; ++ } else if (architecture >= 0x11) { ++ dev_priv->card_type = NV_11; ++ } else if (architecture >= 0x10) { ++ dev_priv->card_type = NV_10; ++ } else if (architecture >= 0x04) { ++ dev_priv->card_type = NV_04; ++ } else { ++ dev_priv->card_type = NV_UNKNOWN; ++ } ++ ++ DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0); ++ ++ if (dev_priv->card_type == NV_UNKNOWN) { ++ return -EINVAL; ++ } ++ ++ /* Special flags */ ++ if (dev->pci_device == 0x01a0) { ++ dev_priv->flags |= NV_NFORCE; ++ } else if (dev->pci_device == 0x01f0) { ++ dev_priv->flags |= NV_NFORCE2; ++ } ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ return 0; ++} ++ ++void nouveau_lastclose(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* In the case of an error dev_priv may not be be allocated yet */ ++ if (dev_priv && dev_priv->card_type) { ++ nouveau_card_takedown(dev); ++ ++ if(dev_priv->fb_mtrr>0) ++ { ++ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); ++ dev_priv->fb_mtrr=0; ++ } ++ } ++} ++ ++int nouveau_unload(struct drm_device *dev) ++{ ++ drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ return 0; ++} ++ ++int ++nouveau_ioctl_card_init(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ return nouveau_card_init(dev); ++} ++ ++int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_getparam *getparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (getparam->param) { ++ case NOUVEAU_GETPARAM_CHIPSET_ID: ++ getparam->value = dev_priv->chipset; ++ break; ++ case NOUVEAU_GETPARAM_PCI_VENDOR: ++ getparam->value=dev->pci_vendor; ++ break; ++ case NOUVEAU_GETPARAM_PCI_DEVICE: ++ getparam->value=dev->pci_device; ++ break; ++ case NOUVEAU_GETPARAM_BUS_TYPE: ++ if (drm_device_is_agp(dev)) ++ getparam->value=NV_AGP; ++ else if (drm_device_is_pcie(dev)) ++ getparam->value=NV_PCIE; ++ else ++ getparam->value=NV_PCI; ++ break; ++ case NOUVEAU_GETPARAM_FB_PHYSICAL: ++ getparam->value=dev_priv->fb_phys; ++ break; ++ case NOUVEAU_GETPARAM_AGP_PHYSICAL: ++ getparam->value=dev_priv->gart_info.aper_base; ++ break; ++ case NOUVEAU_GETPARAM_PCI_PHYSICAL: ++ if ( dev -> sg ) ++ getparam->value=(unsigned long)dev->sg->virtual; ++ else ++ { ++ DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n"); ++ return -EINVAL; ++ } ++ break; ++ case NOUVEAU_GETPARAM_FB_SIZE: ++ getparam->value=dev_priv->fb_available_size; ++ break; ++ case NOUVEAU_GETPARAM_AGP_SIZE: ++ getparam->value=dev_priv->gart_info.aper_size; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", getparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_setparam *setparam = data; ++ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ switch (setparam->param) { ++ case NOUVEAU_SETPARAM_CMDBUF_LOCATION: ++ switch (setparam->value) { ++ case NOUVEAU_MEM_AGP: ++ case NOUVEAU_MEM_FB: ++ case NOUVEAU_MEM_PCI: ++ case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE: ++ break; ++ default: ++ DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n", ++ setparam->value); ++ return -EINVAL; ++ } ++ dev_priv->config.cmdbuf.location = setparam->value; ++ break; ++ case NOUVEAU_SETPARAM_CMDBUF_SIZE: ++ dev_priv->config.cmdbuf.size = setparam->value; ++ break; ++ default: ++ DRM_ERROR("unknown parameter %lld\n", setparam->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* waits for idle */ ++void nouveau_wait_for_idle(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv=dev->dev_private; ++ switch(dev_priv->card_type) { ++ case NV_50: ++ break; ++ default: { ++ /* This stuff is more or less a copy of what is seen ++ * in nv28 kmmio dump. ++ */ ++ uint64_t started = dev_priv->Engine.timer.read(dev); ++ uint64_t stopped = started; ++ uint32_t status; ++ do { ++ uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); ++ (void)pmc_e; ++ status = NV_READ(NV04_PGRAPH_STATUS); ++ if (!status) ++ break; ++ stopped = dev_priv->Engine.timer.read(dev); ++ /* It'll never wrap anyway... */ ++ } while (stopped - started < 1000000000ULL); ++ if (status) ++ DRM_ERROR("timed out with status 0x%08x\n", ++ status); ++ } ++ } ++} ++ ++static int nouveau_suspend(struct drm_device *dev) ++{ ++ struct mem_block *p; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_size = 0; ++ list_for_each(p, dev_priv->ramin_heap) ++ if (p->file_priv && (p->start + p->size) > susres->ramin_size) ++ susres->ramin_size = p->start + p->size; ++ if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) { ++ DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < engine->fifo.channels; i++) { ++ uint64_t t_start = engine->timer.read(dev); ++ ++ if (dev_priv->fifos[i] == NULL) ++ continue; ++ ++ /* Give the channel a chance to idle, wait 2s (hopefully) */ ++ while (!nouveau_channel_idle(dev_priv->fifos[i])) ++ if (engine->timer.read(dev) - t_start > 2000000000ULL) { ++ DRM_ERROR("Failed to idle channel %d before" ++ "suspend.", dev_priv->fifos[i]->id); ++ return -EBUSY; ++ } ++ } ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ susres->fifo_mode = NV_READ(NV04_PFIFO_MODE); ++ ++ if (dev_priv->card_type >= NV_10) { ++ susres->graph_state = NV_READ(NV10_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL); ++ } else { ++ susres->graph_state = NV_READ(NV04_PGRAPH_STATE); ++ susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL); ++ } ++ ++ engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]); ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ susres->ramin_copy[i] = NV_RI32(i << 2); ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 1); ++ ++ return 0; ++} ++ ++static int nouveau_resume(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_suspend_resume *susres = &dev_priv->susres; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int i; ++ ++ if (!susres->ramin_copy) ++ return -EINVAL; ++ ++ DRM_DEBUG("Doing resume\n"); ++ ++ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ++ struct drm_agp_info info; ++ struct drm_agp_mode mode; ++ ++ /* agp bridge drivers don't re-enable agp on resume. lame. */ ++ if ((i = drm_agp_info(dev, &info))) { ++ DRM_ERROR("Unable to get AGP info: %d\n", i); ++ return i; ++ } ++ mode.mode = info.mode; ++ if ((i = drm_agp_enable(dev, mode))) { ++ DRM_ERROR("Unable to enable AGP: %d\n", i); ++ return i; ++ } ++ } ++ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->mc.init(dev); ++ engine->timer.init(dev); ++ engine->fb.init(dev); ++ engine->graph.init(dev); ++ engine->fifo.init(dev); ++ ++ NV_WRITE(NV04_PGRAPH_FIFO, 0); ++ /* disable the fifo caches */ ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); ++ ++ /* PMC power cycling PFIFO in init clobbers some of the stuff stored in ++ * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful ++ */ ++ for (i = 0; i < susres->ramin_size / 4; i++) ++ NV_WI32(i << 2, susres->ramin_copy[i]); ++ ++ engine->fifo.load_context(dev_priv->fifos[0]); ++ NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode); ++ ++ engine->graph.load_context(dev_priv->fifos[0]); ++ nouveau_wait_for_idle(dev); ++ ++ if (dev_priv->card_type >= NV_10) { ++ NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } else { ++ NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control); ++ } ++ ++ /* reenable the fifo caches */ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO, 0x1); ++ ++ if (dev->irq_enabled) ++ nouveau_irq_postinstall(dev); ++ ++ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER); ++ susres->ramin_copy = NULL; ++ susres->ramin_size = 0; ++ ++ return 0; ++} ++ ++int nouveau_ioctl_suspend(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_suspend(dev); ++} ++ ++int nouveau_ioctl_resume(struct drm_device *dev, void *data, ++ struct drm_file *file_priv) ++{ ++ NOUVEAU_CHECK_INITIALISED_WITH_RETURN; ++ ++ return nouveau_resume(dev); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.c git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c +--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,191 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_reg.h" ++ ++/*TODO: add a "card_type" attribute*/ ++typedef struct{ ++ uint32_t oclass; /* object class for this software method */ ++ uint32_t mthd; /* method number */ ++ void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */ ++ } nouveau_software_method_t; ++ ++ ++ /* This function handles the NV04 setcontext software methods. ++One function for all because they are very similar.*/ ++static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF; ++ uint32_t value_to_set = 0, bit_to_set = 0; ++ ++ switch ( oclass ) { ++ case 0x4a: ++ switch ( mthd ) { ++ case 0x188 : ++ case 0x18c : ++ bit_to_set = 0; ++ break; ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ default : ; ++ }; ++ break; ++ case 0x5c: ++ switch ( mthd ) { ++ case 0x184: ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x188: ++ case 0x18c: ++ bit_to_set = 0; ++ break; ++ case 0x198: ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x5f: ++ switch ( mthd ) { ++ case 0x184 : ++ bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/ ++ break; ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x61: ++ switch ( mthd ) { ++ case 0x188 : ++ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/ ++ break; ++ case 0x18c : ++ case 0x190 : ++ bit_to_set = 0; ++ break; ++ case 0x19c : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x2fc : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/ ++ break; ++ }; ++ break; ++ case 0x77: ++ switch ( mthd ) { ++ case 0x198 : ++ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/ ++ break; ++ case 0x304 : ++ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG ++ break; ++ }; ++ break; ++ default :; ++ }; ++ ++ value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set; ++ ++ /*RAMIN*/ ++ nouveau_wait_for_idle(dev); ++ NV_WRITE(0x00700000 | inst_loc << 4, value_to_set); ++ ++ /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set); ++ ++ /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/ ++ NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set); ++} ++ ++ nouveau_software_method_t nouveau_sw_methods[] = { ++ /*NV04 context software methods*/ ++ { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x188, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x190, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method }, ++ { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x198, nouveau_NV04_setcontext_sw_method }, ++ { 0x77, 0x304, nouveau_NV04_setcontext_sw_method }, ++ /*terminator*/ ++ { 0x0, 0x0, NULL, }, ++ }; ++ ++ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) { ++ int i = 0; ++ while ( nouveau_sw_methods[ i ] . method_code != NULL ) ++ { ++ if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method ) ++ { ++ nouveau_sw_methods[ i ] . method_code(dev, oclass, method); ++ return 0; ++ } ++ i ++; ++ } ++ ++ return 1; ++ } +diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.h git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h +--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,33 @@ ++/* ++ * Copyright (C) 2007 Arthur Huillet. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++/* ++ * Authors: ++ * Arthur Huillet ++ */ ++ ++int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */ +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fb.c git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c +--- git/drivers/gpu/drm-tungsten/nv04_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,23 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows ++ * nvidia reading PFB_CFG_0, then writing back its original value. ++ * (which was 0x701114 in this case) ++ */ ++ NV_WRITE(NV04_PFB_CFG0, 0x1114); ++ ++ return 0; ++} ++ ++void ++nv04_fb_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c +--- git/drivers/gpu/drm-tungsten/nv04_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,138 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV04_RAMFC_##offset/4) ++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) ++#define NV04_RAMFC__SIZE 32 ++ ++int ++nv04_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv04_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, ++ NV04_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Setup initial state */ ++ RAMFC_WR(DMA_PUT, chan->pushbuf_base); ++ RAMFC_WR(DMA_GET, chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0)); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<id)); ++ return 0; ++} ++ ++void ++nv04_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv04_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE)); ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv04_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; ++ tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE); ++ RAMFC_WR(DMA_INSTANCE, tmp); ++ ++ RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_graph.c git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c +--- git/drivers/gpu/drm-tungsten/nv04_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,516 @@ ++/* ++ * Copyright 2007 Stephane Marchesin ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++static uint32_t nv04_graph_ctx_regs [] = { ++ NV04_PGRAPH_CTX_SWITCH1, ++ NV04_PGRAPH_CTX_SWITCH2, ++ NV04_PGRAPH_CTX_SWITCH3, ++ NV04_PGRAPH_CTX_SWITCH4, ++ NV04_PGRAPH_CTX_CACHE1, ++ NV04_PGRAPH_CTX_CACHE2, ++ NV04_PGRAPH_CTX_CACHE3, ++ NV04_PGRAPH_CTX_CACHE4, ++ 0x00400184, ++ 0x004001a4, ++ 0x004001c4, ++ 0x004001e4, ++ 0x00400188, ++ 0x004001a8, ++ 0x004001c8, ++ 0x004001e8, ++ 0x0040018c, ++ 0x004001ac, ++ 0x004001cc, ++ 0x004001ec, ++ 0x00400190, ++ 0x004001b0, ++ 0x004001d0, ++ 0x004001f0, ++ 0x00400194, ++ 0x004001b4, ++ 0x004001d4, ++ 0x004001f4, ++ 0x00400198, ++ 0x004001b8, ++ 0x004001d8, ++ 0x004001f8, ++ 0x0040019c, ++ 0x004001bc, ++ 0x004001dc, ++ 0x004001fc, ++ 0x00400174, ++ NV04_PGRAPH_DMA_START_0, ++ NV04_PGRAPH_DMA_START_1, ++ NV04_PGRAPH_DMA_LENGTH, ++ NV04_PGRAPH_DMA_MISC, ++ NV04_PGRAPH_DMA_PITCH, ++ NV04_PGRAPH_BOFFSET0, ++ NV04_PGRAPH_BBASE0, ++ NV04_PGRAPH_BLIMIT0, ++ NV04_PGRAPH_BOFFSET1, ++ NV04_PGRAPH_BBASE1, ++ NV04_PGRAPH_BLIMIT1, ++ NV04_PGRAPH_BOFFSET2, ++ NV04_PGRAPH_BBASE2, ++ NV04_PGRAPH_BLIMIT2, ++ NV04_PGRAPH_BOFFSET3, ++ NV04_PGRAPH_BBASE3, ++ NV04_PGRAPH_BLIMIT3, ++ NV04_PGRAPH_BOFFSET4, ++ NV04_PGRAPH_BBASE4, ++ NV04_PGRAPH_BLIMIT4, ++ NV04_PGRAPH_BOFFSET5, ++ NV04_PGRAPH_BBASE5, ++ NV04_PGRAPH_BLIMIT5, ++ NV04_PGRAPH_BPITCH0, ++ NV04_PGRAPH_BPITCH1, ++ NV04_PGRAPH_BPITCH2, ++ NV04_PGRAPH_BPITCH3, ++ NV04_PGRAPH_BPITCH4, ++ NV04_PGRAPH_SURFACE, ++ NV04_PGRAPH_STATE, ++ NV04_PGRAPH_BSWIZZLE2, ++ NV04_PGRAPH_BSWIZZLE5, ++ NV04_PGRAPH_BPIXEL, ++ NV04_PGRAPH_NOTIFY, ++ NV04_PGRAPH_PATT_COLOR0, ++ NV04_PGRAPH_PATT_COLOR1, ++ NV04_PGRAPH_PATT_COLORRAM+0x00, ++ NV04_PGRAPH_PATT_COLORRAM+0x01, ++ NV04_PGRAPH_PATT_COLORRAM+0x02, ++ NV04_PGRAPH_PATT_COLORRAM+0x03, ++ NV04_PGRAPH_PATT_COLORRAM+0x04, ++ NV04_PGRAPH_PATT_COLORRAM+0x05, ++ NV04_PGRAPH_PATT_COLORRAM+0x06, ++ NV04_PGRAPH_PATT_COLORRAM+0x07, ++ NV04_PGRAPH_PATT_COLORRAM+0x08, ++ NV04_PGRAPH_PATT_COLORRAM+0x09, ++ NV04_PGRAPH_PATT_COLORRAM+0x0A, ++ NV04_PGRAPH_PATT_COLORRAM+0x0B, ++ NV04_PGRAPH_PATT_COLORRAM+0x0C, ++ NV04_PGRAPH_PATT_COLORRAM+0x0D, ++ NV04_PGRAPH_PATT_COLORRAM+0x0E, ++ NV04_PGRAPH_PATT_COLORRAM+0x0F, ++ NV04_PGRAPH_PATT_COLORRAM+0x10, ++ NV04_PGRAPH_PATT_COLORRAM+0x11, ++ NV04_PGRAPH_PATT_COLORRAM+0x12, ++ NV04_PGRAPH_PATT_COLORRAM+0x13, ++ NV04_PGRAPH_PATT_COLORRAM+0x14, ++ NV04_PGRAPH_PATT_COLORRAM+0x15, ++ NV04_PGRAPH_PATT_COLORRAM+0x16, ++ NV04_PGRAPH_PATT_COLORRAM+0x17, ++ NV04_PGRAPH_PATT_COLORRAM+0x18, ++ NV04_PGRAPH_PATT_COLORRAM+0x19, ++ NV04_PGRAPH_PATT_COLORRAM+0x1A, ++ NV04_PGRAPH_PATT_COLORRAM+0x1B, ++ NV04_PGRAPH_PATT_COLORRAM+0x1C, ++ NV04_PGRAPH_PATT_COLORRAM+0x1D, ++ NV04_PGRAPH_PATT_COLORRAM+0x1E, ++ NV04_PGRAPH_PATT_COLORRAM+0x1F, ++ NV04_PGRAPH_PATT_COLORRAM+0x20, ++ NV04_PGRAPH_PATT_COLORRAM+0x21, ++ NV04_PGRAPH_PATT_COLORRAM+0x22, ++ NV04_PGRAPH_PATT_COLORRAM+0x23, ++ NV04_PGRAPH_PATT_COLORRAM+0x24, ++ NV04_PGRAPH_PATT_COLORRAM+0x25, ++ NV04_PGRAPH_PATT_COLORRAM+0x26, ++ NV04_PGRAPH_PATT_COLORRAM+0x27, ++ NV04_PGRAPH_PATT_COLORRAM+0x28, ++ NV04_PGRAPH_PATT_COLORRAM+0x29, ++ NV04_PGRAPH_PATT_COLORRAM+0x2A, ++ NV04_PGRAPH_PATT_COLORRAM+0x2B, ++ NV04_PGRAPH_PATT_COLORRAM+0x2C, ++ NV04_PGRAPH_PATT_COLORRAM+0x2D, ++ NV04_PGRAPH_PATT_COLORRAM+0x2E, ++ NV04_PGRAPH_PATT_COLORRAM+0x2F, ++ NV04_PGRAPH_PATT_COLORRAM+0x30, ++ NV04_PGRAPH_PATT_COLORRAM+0x31, ++ NV04_PGRAPH_PATT_COLORRAM+0x32, ++ NV04_PGRAPH_PATT_COLORRAM+0x33, ++ NV04_PGRAPH_PATT_COLORRAM+0x34, ++ NV04_PGRAPH_PATT_COLORRAM+0x35, ++ NV04_PGRAPH_PATT_COLORRAM+0x36, ++ NV04_PGRAPH_PATT_COLORRAM+0x37, ++ NV04_PGRAPH_PATT_COLORRAM+0x38, ++ NV04_PGRAPH_PATT_COLORRAM+0x39, ++ NV04_PGRAPH_PATT_COLORRAM+0x3A, ++ NV04_PGRAPH_PATT_COLORRAM+0x3B, ++ NV04_PGRAPH_PATT_COLORRAM+0x3C, ++ NV04_PGRAPH_PATT_COLORRAM+0x3D, ++ NV04_PGRAPH_PATT_COLORRAM+0x3E, ++ NV04_PGRAPH_PATT_COLORRAM+0x3F, ++ NV04_PGRAPH_PATTERN, ++ 0x0040080c, ++ NV04_PGRAPH_PATTERN_SHAPE, ++ 0x00400600, ++ NV04_PGRAPH_ROP3, ++ NV04_PGRAPH_CHROMA, ++ NV04_PGRAPH_BETA_AND, ++ NV04_PGRAPH_BETA_PREMULT, ++ NV04_PGRAPH_CONTROL0, ++ NV04_PGRAPH_CONTROL1, ++ NV04_PGRAPH_CONTROL2, ++ NV04_PGRAPH_BLEND, ++ NV04_PGRAPH_STORED_FMT, ++ NV04_PGRAPH_SOURCE_COLOR, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400400, ++ 0x00400480, ++ 0x00400404, ++ 0x00400484, ++ 0x00400408, ++ 0x00400488, ++ 0x0040040c, ++ 0x0040048c, ++ 0x00400410, ++ 0x00400490, ++ 0x00400414, ++ 0x00400494, ++ 0x00400418, ++ 0x00400498, ++ 0x0040041c, ++ 0x0040049c, ++ 0x00400420, ++ 0x004004a0, ++ 0x00400424, ++ 0x004004a4, ++ 0x00400428, ++ 0x004004a8, ++ 0x0040042c, ++ 0x004004ac, ++ 0x00400430, ++ 0x004004b0, ++ 0x00400434, ++ 0x004004b4, ++ 0x00400438, ++ 0x004004b8, ++ 0x0040043c, ++ 0x004004bc, ++ 0x00400440, ++ 0x004004c0, ++ 0x00400444, ++ 0x004004c4, ++ 0x00400448, ++ 0x004004c8, ++ 0x0040044c, ++ 0x004004cc, ++ 0x00400450, ++ 0x004004d0, ++ 0x00400454, ++ 0x004004d4, ++ 0x00400458, ++ 0x004004d8, ++ 0x0040045c, ++ 0x004004dc, ++ 0x00400460, ++ 0x004004e0, ++ 0x00400464, ++ 0x004004e4, ++ 0x00400468, ++ 0x004004e8, ++ 0x0040046c, ++ 0x004004ec, ++ 0x00400470, ++ 0x004004f0, ++ 0x00400474, ++ 0x004004f4, ++ 0x00400478, ++ 0x004004f8, ++ 0x0040047c, ++ 0x004004fc, ++ 0x0040053c, ++ 0x00400544, ++ 0x00400540, ++ 0x00400548, ++ 0x00400560, ++ 0x00400568, ++ 0x00400564, ++ 0x0040056c, ++ 0x00400534, ++ 0x00400538, ++ 0x00400514, ++ 0x00400518, ++ 0x0040051c, ++ 0x00400520, ++ 0x00400524, ++ 0x00400528, ++ 0x0040052c, ++ 0x00400530, ++ 0x00400d00, ++ 0x00400d40, ++ 0x00400d80, ++ 0x00400d04, ++ 0x00400d44, ++ 0x00400d84, ++ 0x00400d08, ++ 0x00400d48, ++ 0x00400d88, ++ 0x00400d0c, ++ 0x00400d4c, ++ 0x00400d8c, ++ 0x00400d10, ++ 0x00400d50, ++ 0x00400d90, ++ 0x00400d14, ++ 0x00400d54, ++ 0x00400d94, ++ 0x00400d18, ++ 0x00400d58, ++ 0x00400d98, ++ 0x00400d1c, ++ 0x00400d5c, ++ 0x00400d9c, ++ 0x00400d20, ++ 0x00400d60, ++ 0x00400da0, ++ 0x00400d24, ++ 0x00400d64, ++ 0x00400da4, ++ 0x00400d28, ++ 0x00400d68, ++ 0x00400da8, ++ 0x00400d2c, ++ 0x00400d6c, ++ 0x00400dac, ++ 0x00400d30, ++ 0x00400d70, ++ 0x00400db0, ++ 0x00400d34, ++ 0x00400d74, ++ 0x00400db4, ++ 0x00400d38, ++ 0x00400d78, ++ 0x00400db8, ++ 0x00400d3c, ++ 0x00400d7c, ++ 0x00400dbc, ++ 0x00400590, ++ 0x00400594, ++ 0x00400598, ++ 0x0040059c, ++ 0x004005a8, ++ 0x004005ac, ++ 0x004005b0, ++ 0x004005b4, ++ 0x004005c0, ++ 0x004005c4, ++ 0x004005c8, ++ 0x004005cc, ++ 0x004005d0, ++ 0x004005d4, ++ 0x004005d8, ++ 0x004005dc, ++ 0x004005e0, ++ NV04_PGRAPH_PASSTHRU_0, ++ NV04_PGRAPH_PASSTHRU_1, ++ NV04_PGRAPH_PASSTHRU_2, ++ NV04_PGRAPH_DVD_COLORFMT, ++ NV04_PGRAPH_SCALED_FORMAT, ++ NV04_PGRAPH_MISC24_0, ++ NV04_PGRAPH_MISC24_1, ++ NV04_PGRAPH_MISC24_2, ++ 0x00400500, ++ 0x00400504, ++ NV04_PGRAPH_VALID1, ++ NV04_PGRAPH_VALID2 ++ ++ ++}; ++ ++struct graph_state { ++ int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])]; ++}; ++ ++void nouveau_nv04_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ ++ chid = engine->fifo.channel_id(dev); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_DEBUG("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++/* NV_WRITE(NV03_PFIFO_CACHES, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ ++ if (last) ++ nv04_graph_save_context(last); ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24)); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv04_graph_load_context(next); ++ ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24); ++ NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); ++ ++/* NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1); ++ NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++int nv04_graph_create_context(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx; ++ DRM_DEBUG("nv04_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; ++ pgraph_ctx->nv04[0] = 0x0001ffff; ++ /* is it really needed ??? */ ++ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); ++ //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); ++ ++ return 0; ++} ++ ++void nv04_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++} ++ ++int nv04_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]); ++ ++ return 0; ++} ++ ++int nv04_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ /* Enable PGRAPH interrupts */ ++ NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_VALID1, 0); ++ NV_WRITE(NV04_PGRAPH_VALID2, 0); ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000); ++ /*1231C000 blob, 001 haiku*/ ++ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100); ++ /*0x72111100 blob , 01 haiku*/ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071); ++ /*haiku same*/ ++ ++ /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31); ++ /*haiku and blob 10d4*/ ++ ++ NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* These don't belong here, they're part of a per-channel context */ ++ NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv04_graph_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c +--- git/drivers/gpu/drm-tungsten/nv04_instmem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,159 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++static void ++nv04_instmem_determine_amount(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Figure out how much instance memory we need */ ++ if (dev_priv->card_type >= NV_40) { ++ /* We'll want more instance memory than this on some NV4x cards. ++ * There's a 16MB aperture to play with that maps onto the end ++ * of vram. For now, only reserve a small piece until we know ++ * more about what each chipset requires. ++ */ ++ dev_priv->ramin_rsvd_vram = (1*1024* 1024); ++ } else { ++ /*XXX: what *are* the limits on ramin_rsvd_vram = (512*1024); ++ } ++ DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); ++ ++ /* Clear all of it, except the BIOS image that's in the first 64KiB */ ++ for (i=(64*1024); iramin_rsvd_vram; i+=4) ++ NV_WI32(i, 0x00000000); ++} ++ ++static void ++nv04_instmem_configure_fixed_tables(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ ++ /* FIFO hash table (RAMHT) ++ * use 4k hash table at RAMIN+0x10000 ++ * TODO: extend the hash table ++ */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, ++ dev_priv->ramht_size); ++ ++ /* FIFO runout table (RAMRO) - 512k at 0x11200 */ ++ dev_priv->ramro_offset = 0x11200; ++ dev_priv->ramro_size = 512; ++ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, ++ dev_priv->ramro_size); ++ ++ /* FIFO context table (RAMFC) ++ * NV40 : Not sure exactly how to position RAMFC on some cards, ++ * 0x30002 seems to position it at RAMIN+0x20000 on these ++ * cards. RAMFC is 4kb (32 fifos, 128byte entries). ++ * Others: Position RAMFC at RAMIN+0x11400 ++ */ ++ switch(dev_priv->card_type) ++ { ++ case NV_40: ++ case NV_44: ++ dev_priv->ramfc_offset = 0x20000; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ case NV_30: ++ case NV_20: ++ case NV_17: ++ case NV_11: ++ case NV_10: ++ case NV_04: ++ default: ++ dev_priv->ramfc_offset = 0x11400; ++ dev_priv->ramfc_size = engine->fifo.channels * ++ nouveau_fifo_ctx_size(dev); ++ break; ++ } ++ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, ++ dev_priv->ramfc_size); ++} ++ ++int nv04_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t offset; ++ int ret = 0; ++ ++ nv04_instmem_determine_amount(dev); ++ nv04_instmem_configure_fixed_tables(dev); ++ ++ /* Create a heap to manage RAMIN allocations, we don't allocate ++ * the space that was reserved for RAMHT/FC/RO. ++ */ ++ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; ++ ++ /* On my NV4E, there's *something* clobbering the 16KiB just after ++ * where we setup these fixed tables. No idea what it is just yet, ++ * so reserve this space on all NV4X cards for now. ++ */ ++ if (dev_priv->card_type >= NV_40) ++ offset += 16*1024; ++ ++ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ offset, dev_priv->ramin_rsvd_vram - offset); ++ if (ret) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ return ret; ++} ++ ++void ++nv04_instmem_takedown(struct drm_device *dev) ++{ ++} ++ ++int ++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++void ++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (!gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_mc.c git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c +--- git/drivers/gpu/drm-tungsten/nv04_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,22 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void ++nv04_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv04_timer.c git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c +--- git/drivers/gpu/drm-tungsten/nv04_timer.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,53 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv04_timer_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000); ++ NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF); ++ ++ /* Just use the pre-existing values when possible for now; these regs ++ * are not written in nv (driver writer missed a /4 on the address), and ++ * writing 8 and 3 to the correct regs breaks the timings on the LVDS ++ * hardware sequencing microcode. ++ * A correct solution (involving calculations with the GPU PLL) can ++ * be done when kernel modesetting lands ++ */ ++ if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) { ++ NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008); ++ NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003); ++ } ++ ++ return 0; ++} ++ ++uint64_t ++nv04_timer_read(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t low; ++ /* From kmmio dumps on nv28 this looks like how the blob does this. ++ * It reads the high dword twice, before and after. ++ * The only explanation seems to be that the 64-bit timer counter ++ * advances between high and low dword reads and may corrupt the ++ * result. Not confirmed. ++ */ ++ uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1); ++ uint32_t high1; ++ do { ++ high1 = high2; ++ low = NV_READ(NV04_PTIMER_TIME_0); ++ high2 = NV_READ(NV04_PTIMER_TIME_1); ++ } while(high1 != high2); ++ return (((uint64_t)high2) << 32) | (uint64_t)low; ++} ++ ++void ++nv04_timer_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv10_fb.c git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c +--- git/drivers/gpu/drm-tungsten/nv10_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,25 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv10_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size; ++ int i; ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ for (i=0; iramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV10_RAMFC_##offset/4) ++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) ++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) ++ ++int ++nv10_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv10_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, ++ NV10_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv10_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv10_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ ++ tmp = RAMFC_RD(DMA_INSTANCE); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH)); ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE, ++ RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, ++ RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, ++ RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE, ++ RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE, ++ RAMFC_RD(DMA_SUBROUTINE)); ++ } ++ ++ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv10_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF; ++ tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16); ++ RAMFC_WR(DMA_INSTANCE , tmp); ++ ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH)); ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ ++ if (dev_priv->chipset >= 0x17) { ++ RAMFC_WR(ACQUIRE_VALUE, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP)); ++ RAMFC_WR(ACQUIRE_TIMEOUT, ++ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE, ++ NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ RAMFC_WR(DMA_SUBROUTINE, ++ NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ } ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv10_graph.c git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c +--- git/drivers/gpu/drm-tungsten/nv10_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,914 @@ ++/* ++ * Copyright 2007 Matthieu CASTET ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drm.h" ++#include "nouveau_drv.h" ++ ++#define NV10_FIFO_NUMBER 32 ++ ++struct pipe_state { ++ uint32_t pipe_0x0000[0x040/4]; ++ uint32_t pipe_0x0040[0x010/4]; ++ uint32_t pipe_0x0200[0x0c0/4]; ++ uint32_t pipe_0x4400[0x080/4]; ++ uint32_t pipe_0x6400[0x3b0/4]; ++ uint32_t pipe_0x6800[0x2f0/4]; ++ uint32_t pipe_0x6c00[0x030/4]; ++ uint32_t pipe_0x7000[0x130/4]; ++ uint32_t pipe_0x7400[0x0c0/4]; ++ uint32_t pipe_0x7800[0x0c0/4]; ++}; ++ ++static int nv10_graph_ctx_regs [] = { ++NV10_PGRAPH_CTX_SWITCH1, ++NV10_PGRAPH_CTX_SWITCH2, ++NV10_PGRAPH_CTX_SWITCH3, ++NV10_PGRAPH_CTX_SWITCH4, ++NV10_PGRAPH_CTX_SWITCH5, ++NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */ ++NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */ ++NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */ ++NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */ ++NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */ ++0x00400164, ++0x00400184, ++0x004001a4, ++0x004001c4, ++0x004001e4, ++0x00400168, ++0x00400188, ++0x004001a8, ++0x004001c8, ++0x004001e8, ++0x0040016c, ++0x0040018c, ++0x004001ac, ++0x004001cc, ++0x004001ec, ++0x00400170, ++0x00400190, ++0x004001b0, ++0x004001d0, ++0x004001f0, ++0x00400174, ++0x00400194, ++0x004001b4, ++0x004001d4, ++0x004001f4, ++0x00400178, ++0x00400198, ++0x004001b8, ++0x004001d8, ++0x004001f8, ++0x0040017c, ++0x0040019c, ++0x004001bc, ++0x004001dc, ++0x004001fc, ++NV10_PGRAPH_CTX_USER, ++NV04_PGRAPH_DMA_START_0, ++NV04_PGRAPH_DMA_START_1, ++NV04_PGRAPH_DMA_LENGTH, ++NV04_PGRAPH_DMA_MISC, ++NV10_PGRAPH_DMA_PITCH, ++NV04_PGRAPH_BOFFSET0, ++NV04_PGRAPH_BBASE0, ++NV04_PGRAPH_BLIMIT0, ++NV04_PGRAPH_BOFFSET1, ++NV04_PGRAPH_BBASE1, ++NV04_PGRAPH_BLIMIT1, ++NV04_PGRAPH_BOFFSET2, ++NV04_PGRAPH_BBASE2, ++NV04_PGRAPH_BLIMIT2, ++NV04_PGRAPH_BOFFSET3, ++NV04_PGRAPH_BBASE3, ++NV04_PGRAPH_BLIMIT3, ++NV04_PGRAPH_BOFFSET4, ++NV04_PGRAPH_BBASE4, ++NV04_PGRAPH_BLIMIT4, ++NV04_PGRAPH_BOFFSET5, ++NV04_PGRAPH_BBASE5, ++NV04_PGRAPH_BLIMIT5, ++NV04_PGRAPH_BPITCH0, ++NV04_PGRAPH_BPITCH1, ++NV04_PGRAPH_BPITCH2, ++NV04_PGRAPH_BPITCH3, ++NV04_PGRAPH_BPITCH4, ++NV10_PGRAPH_SURFACE, ++NV10_PGRAPH_STATE, ++NV04_PGRAPH_BSWIZZLE2, ++NV04_PGRAPH_BSWIZZLE5, ++NV04_PGRAPH_BPIXEL, ++NV10_PGRAPH_NOTIFY, ++NV04_PGRAPH_PATT_COLOR0, ++NV04_PGRAPH_PATT_COLOR1, ++NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ ++0x00400904, ++0x00400908, ++0x0040090c, ++0x00400910, ++0x00400914, ++0x00400918, ++0x0040091c, ++0x00400920, ++0x00400924, ++0x00400928, ++0x0040092c, ++0x00400930, ++0x00400934, ++0x00400938, ++0x0040093c, ++0x00400940, ++0x00400944, ++0x00400948, ++0x0040094c, ++0x00400950, ++0x00400954, ++0x00400958, ++0x0040095c, ++0x00400960, ++0x00400964, ++0x00400968, ++0x0040096c, ++0x00400970, ++0x00400974, ++0x00400978, ++0x0040097c, ++0x00400980, ++0x00400984, ++0x00400988, ++0x0040098c, ++0x00400990, ++0x00400994, ++0x00400998, ++0x0040099c, ++0x004009a0, ++0x004009a4, ++0x004009a8, ++0x004009ac, ++0x004009b0, ++0x004009b4, ++0x004009b8, ++0x004009bc, ++0x004009c0, ++0x004009c4, ++0x004009c8, ++0x004009cc, ++0x004009d0, ++0x004009d4, ++0x004009d8, ++0x004009dc, ++0x004009e0, ++0x004009e4, ++0x004009e8, ++0x004009ec, ++0x004009f0, ++0x004009f4, ++0x004009f8, ++0x004009fc, ++NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ ++0x0040080c, ++NV04_PGRAPH_PATTERN_SHAPE, ++NV03_PGRAPH_MONO_COLOR0, ++NV04_PGRAPH_ROP3, ++NV04_PGRAPH_CHROMA, ++NV04_PGRAPH_BETA_AND, ++NV04_PGRAPH_BETA_PREMULT, ++0x00400e70, ++0x00400e74, ++0x00400e78, ++0x00400e7c, ++0x00400e80, ++0x00400e84, ++0x00400e88, ++0x00400e8c, ++0x00400ea0, ++0x00400ea4, ++0x00400ea8, ++0x00400e90, ++0x00400e94, ++0x00400e98, ++0x00400e9c, ++NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */ ++NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20 to 0x400f3c */ ++0x00400f04, ++0x00400f24, ++0x00400f08, ++0x00400f28, ++0x00400f0c, ++0x00400f2c, ++0x00400f10, ++0x00400f30, ++0x00400f14, ++0x00400f34, ++0x00400f18, ++0x00400f38, ++0x00400f1c, ++0x00400f3c, ++NV10_PGRAPH_XFMODE0, ++NV10_PGRAPH_XFMODE1, ++NV10_PGRAPH_GLOBALSTATE0, ++NV10_PGRAPH_GLOBALSTATE1, ++NV04_PGRAPH_STORED_FMT, ++NV04_PGRAPH_SOURCE_COLOR, ++NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ ++NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ ++0x00400404, ++0x00400484, ++0x00400408, ++0x00400488, ++0x0040040c, ++0x0040048c, ++0x00400410, ++0x00400490, ++0x00400414, ++0x00400494, ++0x00400418, ++0x00400498, ++0x0040041c, ++0x0040049c, ++0x00400420, ++0x004004a0, ++0x00400424, ++0x004004a4, ++0x00400428, ++0x004004a8, ++0x0040042c, ++0x004004ac, ++0x00400430, ++0x004004b0, ++0x00400434, ++0x004004b4, ++0x00400438, ++0x004004b8, ++0x0040043c, ++0x004004bc, ++0x00400440, ++0x004004c0, ++0x00400444, ++0x004004c4, ++0x00400448, ++0x004004c8, ++0x0040044c, ++0x004004cc, ++0x00400450, ++0x004004d0, ++0x00400454, ++0x004004d4, ++0x00400458, ++0x004004d8, ++0x0040045c, ++0x004004dc, ++0x00400460, ++0x004004e0, ++0x00400464, ++0x004004e4, ++0x00400468, ++0x004004e8, ++0x0040046c, ++0x004004ec, ++0x00400470, ++0x004004f0, ++0x00400474, ++0x004004f4, ++0x00400478, ++0x004004f8, ++0x0040047c, ++0x004004fc, ++NV03_PGRAPH_ABS_UCLIP_XMIN, ++NV03_PGRAPH_ABS_UCLIP_XMAX, ++NV03_PGRAPH_ABS_UCLIP_YMIN, ++NV03_PGRAPH_ABS_UCLIP_YMAX, ++0x00400550, ++0x00400558, ++0x00400554, ++0x0040055c, ++NV03_PGRAPH_ABS_UCLIPA_XMIN, ++NV03_PGRAPH_ABS_UCLIPA_XMAX, ++NV03_PGRAPH_ABS_UCLIPA_YMIN, ++NV03_PGRAPH_ABS_UCLIPA_YMAX, ++NV03_PGRAPH_ABS_ICLIP_XMAX, ++NV03_PGRAPH_ABS_ICLIP_YMAX, ++NV03_PGRAPH_XY_LOGIC_MISC0, ++NV03_PGRAPH_XY_LOGIC_MISC1, ++NV03_PGRAPH_XY_LOGIC_MISC2, ++NV03_PGRAPH_XY_LOGIC_MISC3, ++NV03_PGRAPH_CLIPX_0, ++NV03_PGRAPH_CLIPX_1, ++NV03_PGRAPH_CLIPY_0, ++NV03_PGRAPH_CLIPY_1, ++NV10_PGRAPH_COMBINER0_IN_ALPHA, ++NV10_PGRAPH_COMBINER1_IN_ALPHA, ++NV10_PGRAPH_COMBINER0_IN_RGB, ++NV10_PGRAPH_COMBINER1_IN_RGB, ++NV10_PGRAPH_COMBINER_COLOR0, ++NV10_PGRAPH_COMBINER_COLOR1, ++NV10_PGRAPH_COMBINER0_OUT_ALPHA, ++NV10_PGRAPH_COMBINER1_OUT_ALPHA, ++NV10_PGRAPH_COMBINER0_OUT_RGB, ++NV10_PGRAPH_COMBINER1_OUT_RGB, ++NV10_PGRAPH_COMBINER_FINAL0, ++NV10_PGRAPH_COMBINER_FINAL1, ++0x00400e00, ++0x00400e04, ++0x00400e08, ++0x00400e0c, ++0x00400e10, ++0x00400e14, ++0x00400e18, ++0x00400e1c, ++0x00400e20, ++0x00400e24, ++0x00400e28, ++0x00400e2c, ++0x00400e30, ++0x00400e34, ++0x00400e38, ++0x00400e3c, ++NV04_PGRAPH_PASSTHRU_0, ++NV04_PGRAPH_PASSTHRU_1, ++NV04_PGRAPH_PASSTHRU_2, ++NV10_PGRAPH_DIMX_TEXTURE, ++NV10_PGRAPH_WDIMX_TEXTURE, ++NV10_PGRAPH_DVD_COLORFMT, ++NV10_PGRAPH_SCALED_FORMAT, ++NV04_PGRAPH_MISC24_0, ++NV04_PGRAPH_MISC24_1, ++NV04_PGRAPH_MISC24_2, ++NV03_PGRAPH_X_MISC, ++NV03_PGRAPH_Y_MISC, ++NV04_PGRAPH_VALID1, ++NV04_PGRAPH_VALID2, ++}; ++ ++static int nv17_graph_ctx_regs [] = { ++NV10_PGRAPH_DEBUG_4, ++0x004006b0, ++0x00400eac, ++0x00400eb0, ++0x00400eb4, ++0x00400eb8, ++0x00400ebc, ++0x00400ec0, ++0x00400ec4, ++0x00400ec8, ++0x00400ecc, ++0x00400ed0, ++0x00400ed4, ++0x00400ed8, ++0x00400edc, ++0x00400ee0, ++0x00400a00, ++0x00400a04, ++}; ++ ++struct graph_state { ++ int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])]; ++ int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])]; ++ struct pipe_state pipe_state; ++}; ++ ++static void nv10_graph_save_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++#define PIPE_SAVE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \ ++ } while (0) ++ ++ PIPE_SAVE(0x4400); ++ PIPE_SAVE(0x0200); ++ PIPE_SAVE(0x6400); ++ PIPE_SAVE(0x6800); ++ PIPE_SAVE(0x6c00); ++ PIPE_SAVE(0x7000); ++ PIPE_SAVE(0x7400); ++ PIPE_SAVE(0x7800); ++ PIPE_SAVE(0x0040); ++ PIPE_SAVE(0x0000); ++ ++#undef PIPE_SAVE ++} ++ ++static void nv10_graph_load_pipe(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ int i; ++ uint32_t xfmode0, xfmode1; ++#define PIPE_RESTORE(addr) \ ++ do { \ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \ ++ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \ ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ ++ } while (0) ++ ++ ++ nouveau_wait_for_idle(dev); ++ /* XXX check haiku comments */ ++ xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0); ++ xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1); ++ NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000); ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ for (i = 0; i < 4; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); ++ for (i = 0; i < 3; i++) ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000); ++ ++ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008); ++ ++ ++ PIPE_RESTORE(0x0200); ++ nouveau_wait_for_idle(dev); ++ ++ /* restore XFMODE */ ++ NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0); ++ NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1); ++ PIPE_RESTORE(0x6400); ++ PIPE_RESTORE(0x6800); ++ PIPE_RESTORE(0x6c00); ++ PIPE_RESTORE(0x7000); ++ PIPE_RESTORE(0x7400); ++ PIPE_RESTORE(0x7800); ++ PIPE_RESTORE(0x4400); ++ PIPE_RESTORE(0x0000); ++ PIPE_RESTORE(0x0040); ++ nouveau_wait_for_idle(dev); ++ ++#undef PIPE_RESTORE ++} ++ ++static void nv10_graph_create_pipe(struct nouveau_channel *chan) { ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; ++ uint32_t *fifo_pipe_state_addr; ++ int i; ++#define PIPE_INIT(addr) \ ++ do { \ ++ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ ++ } while (0) ++#define PIPE_INIT_END(addr) \ ++ do { \ ++ if (fifo_pipe_state_addr != \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \ ++ DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \ ++ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \ ++ } while (0) ++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value ++ ++ PIPE_INIT(0x0200); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0200); ++ ++ PIPE_INIT(0x6400); ++ for (i = 0; i < 211; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x40000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x3f000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ PIPE_INIT_END(0x6400); ++ ++ PIPE_INIT(0x6800); ++ for (i = 0; i < 162; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x3f800000); ++ for (i = 0; i < 25; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6800); ++ ++ PIPE_INIT(0x6c00); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0xbf800000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x6c00); ++ ++ PIPE_INIT(0x7000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x00000000); ++ NV_WRITE_PIPE_INIT(0x7149f2ca); ++ for (i = 0; i < 35; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7000); ++ ++ PIPE_INIT(0x7400); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7400); ++ ++ PIPE_INIT(0x7800); ++ for (i = 0; i < 48; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x7800); ++ ++ PIPE_INIT(0x4400); ++ for (i = 0; i < 32; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x4400); ++ ++ PIPE_INIT(0x0000); ++ for (i = 0; i < 16; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0000); ++ ++ PIPE_INIT(0x0040); ++ for (i = 0; i < 4; i++) ++ NV_WRITE_PIPE_INIT(0x00000000); ++ PIPE_INIT_END(0x0040); ++ ++#undef PIPE_INIT ++#undef PIPE_INIT_END ++#undef NV_WRITE_PIPE_INIT ++} ++ ++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) { ++ if (nv10_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) ++{ ++ int i; ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) { ++ if (nv17_graph_ctx_regs[i] == reg) ++ return i; ++ } ++ DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg); ++ return -1; ++} ++ ++int nv10_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]); ++ } ++ ++ nv10_graph_load_pipe(chan); ++ ++ return 0; ++} ++ ++int nv10_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int i; ++ ++ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]); ++ if (dev_priv->chipset>=0x17) { ++ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) ++ pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]); ++ } ++ ++ nv10_graph_save_pipe(chan); ++ ++ return 0; ++} ++ ++void nouveau_nv10_context_switch(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_engine *engine; ++ struct nouveau_channel *next, *last; ++ int chid; ++ ++ if (!dev) { ++ DRM_DEBUG("Invalid drm_device\n"); ++ return; ++ } ++ dev_priv = dev->dev_private; ++ if (!dev_priv) { ++ DRM_DEBUG("Invalid drm_nouveau_private\n"); ++ return; ++ } ++ if (!dev_priv->fifos) { ++ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n"); ++ return; ++ } ++ engine = &dev_priv->Engine; ++ ++ chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & ++ (engine->fifo.channels - 1); ++ next = dev_priv->fifos[chid]; ++ ++ if (!next) { ++ DRM_ERROR("Invalid next channel\n"); ++ return; ++ } ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & ++ (engine->fifo.channels - 1); ++ last = dev_priv->fifos[chid]; ++ ++ if (!last) { ++ DRM_INFO("WARNING: Invalid last channel, switch to %x\n", ++ next->id); ++ } else { ++ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n", ++ last->id, next->id); ++ } ++ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ if (last) { ++ nouveau_wait_for_idle(dev); ++ nv10_graph_save_context(last); ++ } ++ ++ nouveau_wait_for_idle(dev); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); ++ ++ nouveau_wait_for_idle(dev); ++ ++ nv10_graph_load_context(next); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO,0x1); ++} ++ ++#define NV_WRITE_CTX(reg, val) do { \ ++ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv10[offset] = val; \ ++ } while (0) ++ ++#define NV17_WRITE_CTX(reg, val) do { \ ++ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ ++ if (offset > 0) \ ++ pgraph_ctx->nv17[offset] = val; \ ++ } while (0) ++ ++int nv10_graph_create_context(struct nouveau_channel *chan) { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct graph_state* pgraph_ctx; ++ ++ DRM_DEBUG("nv10_graph_context_create %d\n", chan->id); ++ ++ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx), ++ DRM_MEM_DRIVER); ++ ++ if (pgraph_ctx == NULL) ++ return -ENOMEM; ++ ++ /* mmio trace suggest that should be done in ddx with methods/objects */ ++#if 0 ++ uint32_t tmp, vramsz; ++ /* per channel init from ddx */ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ /*XXX the original ddx code, does this in 2 steps : ++ * tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ * NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ * tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ * NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ */ ++ tmp |= 0x00020100; ++ NV_WRITE_CTX(NV10_PGRAPH_SURFACE, tmp); ++ ++ vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE_CTX(NV04_PGRAPH_BOFFSET0, 0); ++ NV_WRITE_CTX(NV04_PGRAPH_BOFFSET1, 0); ++ NV_WRITE_CTX(NV04_PGRAPH_BLIMIT0 , vramsz); ++ NV_WRITE_CTX(NV04_PGRAPH_BLIMIT1 , vramsz); ++ ++ NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000); ++ NV_WRITE_CTX(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF); ++ ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++#endif ++ ++ NV_WRITE_CTX(0x00400e88, 0x08000000); ++ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); ++ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); ++ NV_WRITE_CTX(0x00400e10, 0x00001000); ++ NV_WRITE_CTX(0x00400e14, 0x00001000); ++ NV_WRITE_CTX(0x00400e30, 0x00080008); ++ NV_WRITE_CTX(0x00400e34, 0x00080008); ++ if (dev_priv->chipset>=0x17) { ++ /* is it really needed ??? */ ++ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); ++ NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); ++ NV17_WRITE_CTX(0x00400eac, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); ++ NV17_WRITE_CTX(0x00400ec0, 0x00000080); ++ NV17_WRITE_CTX(0x00400ed0, 0x00000080); ++ } ++ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); ++ ++ nv10_graph_create_pipe(chan); ++ return 0; ++} ++ ++void nv10_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ struct graph_state* pgraph_ctx = chan->pgraph_ctx; ++ int chid; ++ ++ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER); ++ chan->pgraph_ctx = NULL; ++ ++ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1); ++ ++ /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ???? ++ */ ++#if 0 ++ /* does this avoid a potential context switch while we are written graph ++ * reg, or we should mask graph interrupt ??? ++ */ ++ NV_WRITE(NV04_PGRAPH_FIFO,0x0); ++ if (chid == chan->id) { ++ DRM_INFO("cleanning a channel with graph in current context\n"); ++ nouveau_wait_for_idle(dev); ++ DRM_INFO("reseting current graph context\n"); ++ /* can't be call here because of dynamic mem alloc */ ++ //nv10_graph_create_context(chan); ++ nv10_graph_load_context(chan); ++ } ++ NV_WRITE(NV04_PGRAPH_FIFO, 0x1); ++#else ++ if (chid == chan->id) { ++ DRM_INFO("cleanning a channel with graph in current context\n"); ++ } ++#endif ++} ++ ++int nv10_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */ ++ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 | ++ (1<<29) | ++ (1<<31)); ++ if (dev_priv->chipset>=0x17) { ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000); ++ NV_WRITE(0x004006b0, 0x40000020); ++ } ++ else ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ ++ /* copy tile info from PFB */ ++ for (i=0; idev_private; ++ int i; ++/* ++write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: +++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000 +++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303 +++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000 +++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000 +++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 +++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000 +++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8); ++ for (i = 0; i < 4; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); ++ for (i = 0; i < 16; ++i) ++ INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff); ++ INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); ++ INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); ++ INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000); ++ INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); ++ ++/* ++... +++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... ++*/ ++ for (i = 0; i < 0x880; i += 0x10) { ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c); ++ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements: +++0x00742fbc: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x281c/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements: +++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x0074301c: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements: +++0x00742fcc: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements: +++0x0074302c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements: +++0x00743c9c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements: +++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements: +++0x00743c6c: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements: +++0x00743ccc: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3540/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: +++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i = 0; i < 8; ++i) ++ INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); ++} ++ ++static void nv2a_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x33c/4, 0xffff0000); ++ for(i = 0x3a0; i< 0x3a8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x47c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x4a8/4, 0x44400000); ++ for(i = 0x4d4; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x4f4; i< 0x504; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080000); ++ for(i = 0x50c; i< 0x51c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x51c; i< 0x52c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x000105b8); ++ for(i = 0x52c; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for(i = 0x55c; i< 0x59c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x5fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x604/4, 0x00004000); ++ INSTANCE_WR(ctx, 0x610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x618/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x61c/4, 0x00010000); ++ ++ for (i=0x1a9c; i <= 0x22fc/4; i += 32) { ++ INSTANCE_WR(ctx, i/4 , 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ ++ INSTANCE_WR(ctx, 0x269c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26dc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x26ec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2700/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3024/4, 0x000fe000); ++ INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8); ++ INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000); ++ for(i = 0x341c; i< 0x343c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x001c527c); ++} ++ ++static void nv25_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++/* ++write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements: +++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 +++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++ +++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 +++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000 +++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000 +++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303 +++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000 +++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8 ++ +++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000 +++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000 +++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 +++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000 +++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d1c: 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101); ++ INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111); ++ INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080); ++ INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001); ++ INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000); ++ INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000); ++ INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303); ++ INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000); ++ INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000); ++ INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8); ++ INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008); ++ INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); ++ for (i=0; i<16; ++i) ++ INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); ++ INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff); ++ ++/* ++write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: +++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0 +++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000 +++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000 +++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++... +++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++... +++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000 +++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080); ++ INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000); ++ INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040); ++ INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080); ++ INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0); ++ INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000); ++ INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); ++ INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); ++ INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); ++ for (i=0; i < 0x880/4; i+=4) { ++ INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); ++ INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); ++ } ++ ++/* ++write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements: +++0x00742e24: 3f800000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2704/4), 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements: +++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 +++0x00742e84: 00000000 bf800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000); ++ INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000); ++ INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000); ++ INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000); ++ INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000); ++ ++/* ++write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements: +++0x00742e34: 00000000 3f800000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000); ++ ++/* ++write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements: +++0x00742e94: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements: +++0x00743804: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements: +++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000); ++ ++/* ++write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements: +++0x007437d4: 00000000 00000000 00000000 00000000 ++write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements: +++0x00743824: 00000000 000003f8 00000000 00000000 ++*/ ++ INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8); ++ ++/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */ ++ INSTANCE_WR(ctx, 0x3468/4, 0x002fe000); ++ ++/* ++write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements: +++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c ++*/ ++ for (i=0; i<8; ++i) ++ INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c); ++} ++ ++static void nv30_31_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x410/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x428/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x444/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x448/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x44c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x460/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x48c/4, 0xffff0000); ++ for(i = 0x4e0; i< 0x4e8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4ec/4, 0x00011100); ++ for(i = 0x508; i< 0x548; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x58c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x590/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x594/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x598/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000); ++ for(i = 0x600; i< 0x640; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x640; i< 0x680; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c0; i< 0x700; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x700; i< 0x740; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x740; i< 0x780; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x85c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x860/4, 0x00010000); ++ for(i = 0x864; i< 0x874; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f18; i<= 0x3088 ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30b8; i< 0x30c8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x344c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3808/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x381c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3848/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x3858/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3864/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); ++} ++ ++static void nv34_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x01000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x480/4, 0xffff0000); ++ for(i = 0x4d4; i< 0x4dc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e0/4, 0x00011100); ++ for(i = 0x4fc; i< 0x53c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x57c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x580/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x584/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x588/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000); ++ for(i = 0x5f0; i< 0x630; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x630; i< 0x670; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6b0; i< 0x6f0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x6f0; i< 0x730; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x730; i< 0x770; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x850/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x854/4, 0x00010000); ++ for(i = 0x858; i< 0x868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x15ac; i<= 0x271c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x274c; i< 0x275c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2edc/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x2eec/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000); ++} ++ ++static void nv35_36_graph_context_init(struct drm_device *dev, ++ struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x40c/4, 0x00000101); ++ INSTANCE_WR(ctx, 0x420/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x424/4, 0x00000060); ++ INSTANCE_WR(ctx, 0x440/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x444/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45c/4, 0x44400000); ++ INSTANCE_WR(ctx, 0x488/4, 0xffff0000); ++ for(i = 0x4dc; i< 0x4e4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x4e8/4, 0x00011100); ++ for(i = 0x504; i< 0x544; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x588/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x58c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000); ++ for(i = 0x604; i< 0x644; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00010588); ++ for(i = 0x644; i< 0x684; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00030303); ++ for(i = 0x6c4; i< 0x704; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0008aae4); ++ for(i = 0x704; i< 0x744; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for(i = 0x744; i< 0x784; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x860/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x864/4, 0x00010000); ++ for(i = 0x868; i< 0x878; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00040004); ++ for(i = 0x1f1c; i<= 0x308c ; i+= 16) { ++ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9); ++ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c); ++ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b); ++ } ++ for(i = 0x30bc; i< 0x30cc; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x3450/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x380c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3820/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x384c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3850/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3854/4, 0x3f000000); ++ INSTANCE_WR(ctx, 0x385c/4, 0x40000000); ++ INSTANCE_WR(ctx, 0x3860/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x3868/4, 0xbf800000); ++ INSTANCE_WR(ctx, 0x3870/4, 0xbf800000); ++} ++ ++int nv20_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ unsigned int ctx_size; ++ unsigned int idoffs = 0x28/4; ++ int ret; ++ ++ switch (dev_priv->chipset) { ++ case 0x20: ++ ctx_size = NV20_GRCTX_SIZE; ++ ctx_init = nv20_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x25: ++ case 0x28: ++ ctx_size = NV25_GRCTX_SIZE; ++ ctx_init = nv25_graph_context_init; ++ break; ++ case 0x2a: ++ ctx_size = NV2A_GRCTX_SIZE; ++ ctx_init = nv2a_graph_context_init; ++ idoffs = 0; ++ break; ++ case 0x30: ++ case 0x31: ++ ctx_size = NV30_31_GRCTX_SIZE; ++ ctx_init = nv30_31_graph_context_init; ++ break; ++ case 0x34: ++ ctx_size = NV34_GRCTX_SIZE; ++ ctx_init = nv34_graph_context_init; ++ break; ++ case 0x35: ++ case 0x36: ++ ctx_size = NV35_36_GRCTX_SIZE; ++ ctx_init = nv35_36_graph_context_init; ++ break; ++ default: ++ ctx_size = 0; ++ ctx_init = nv35_36_graph_context_init; ++ DRM_ERROR("Please contact the devs if you want your NV%x" ++ " card to work\n", dev_priv->chipset); ++ return -ENOSYS; ++ break; ++ } ++ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1); ++ /* CTX_USER */ ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, ++ chan->ramin_grctx->instance >> 4); ++ ++ return 0; ++} ++ ++void nv20_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (chan->ramin_grctx) ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++ ++ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0); ++} ++ ++int nv20_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++int nv20_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER, ++ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE); ++ ++ nouveau_wait_for_idle(dev); ++ return 0; ++} ++ ++static void nv20_graph_rdi(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, writecount = 32; ++ uint32_t rdi_index = 0x2c80000; ++ ++ if (dev_priv->chipset == 0x20) { ++ rdi_index = 0x3d0000; ++ writecount = 15; ++ } ++ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index); ++ for (i = 0; i < writecount; i++) ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, 0); ++ ++ nouveau_wait_for_idle(dev); ++} ++ ++int nv20_graph_init(struct drm_device *dev) { ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t tmp, vramsz; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ nv20_graph_rdi(dev); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); ++ NV_WRITE(0x40009C , 0x00000040); ++ ++ if (dev_priv->chipset >= 0x25) { ++ NV_WRITE(0x400890, 0x00080000); ++ NV_WRITE(0x400610, 0x304B1FB6); ++ NV_WRITE(0x400B80, 0x18B82880); ++ NV_WRITE(0x400B84, 0x44000000); ++ NV_WRITE(0x400098, 0x40000080); ++ NV_WRITE(0x400B88, 0x000000ff); ++ } else { ++ NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */ ++ NV_WRITE(0x400094, 0x00000005); ++ NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */ ++ NV_WRITE(0x400B84, 0x24000000); ++ NV_WRITE(0x400098, 0x00000040); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); ++ } ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); ++ } ++ for (i = 0; i < 8; i++) { ++ NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4)); ++ } ++ NV_WRITE(0x4009a0, NV_READ(0x100324)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ /* begin RAM config */ ++ vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz-1); ++ NV_WRITE(0x400868, vramsz-1); ++ ++ /* interesting.. the below overwrites some of the tile setup above.. */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv20_graph_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); ++} ++ ++int nv30_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++// uint32_t vramsz, tmp; ++ int ret, i; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ if (!dev_priv->ctx_table) { ++ /* Create Context Pointer Table */ ++ dev_priv->ctx_table_size = 32 * 4; ++ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, ++ dev_priv->ctx_table_size, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &dev_priv->ctx_table))) ++ return ret; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, ++ dev_priv->ctx_table->instance >> 4); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(0x400890, 0x01b463ff); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6); ++ NV_WRITE(0x400B80, 0x1003d888); ++ NV_WRITE(0x400B84, 0x0c000000); ++ NV_WRITE(0x400098, 0x00000000); ++ NV_WRITE(0x40009C, 0x0005ad00); ++ NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2 ++ NV_WRITE(0x4000a0, 0x00000000); ++ NV_WRITE(0x4000a4, 0x00000008); ++ NV_WRITE(0x4008a8, 0xb784a400); ++ NV_WRITE(0x400ba0, 0x002f8685); ++ NV_WRITE(0x400ba4, 0x00231f3f); ++ NV_WRITE(0x4008a4, 0x40000020); ++ ++ if (dev_priv->chipset == 0x34) { ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032); ++ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004); ++ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002); ++ } ++ ++ NV_WRITE(0x4000c0, 0x00000016); ++ ++ /* copy tile info from PFB */ ++ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { ++ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); ++ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ ++ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); ++ /* which is NV40_PGRAPH_TSIZE0(i) ?? */ ++ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); ++ /* which is NV40_PGRAPH_TILE0(i) ?? */ ++ } ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(0x0040075c , 0x00000001); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ /* begin RAM config */ ++// vramsz = drm_get_resource_len(dev, 0) - 1; ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ if (dev_priv->chipset != 0x34) { ++ NV_WRITE(0x400750, 0x00EA0000); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400750, 0x00EA0004); ++ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1)); ++ } ++ ++#if 0 ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz-1); ++ NV_WRITE(0x400868, vramsz-1); ++ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ /* per-context state, doesn't belong here */ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++#endif ++ ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv40_fb.c git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c +--- git/drivers/gpu/drm-tungsten/nv40_fb.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,62 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_fb_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t fb_bar_size, tmp; ++ int num_tiles; ++ int i; ++ ++ /* This is strictly a NV4x register (don't know about NV5x). */ ++ /* The blob sets these to all kinds of values, and they mess up our setup. */ ++ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */ ++ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */ ++ /* Any idea what this is? */ ++ NV_WRITE(NV40_PFB_UNK_800, 0x1); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ tmp = NV_READ(NV10_PFB_CLOSE_PAGE2); ++ NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15)); ++ num_tiles = NV10_PFB_TILE__SIZE; ++ break; ++ case 0x46: /* G72 */ ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ case 0x4c: /* C51 (G7X version) */ ++ num_tiles = NV40_PFB_TILE__SIZE_1; ++ break; ++ default: ++ num_tiles = NV40_PFB_TILE__SIZE_0; ++ break; ++ } ++ ++ fb_bar_size = drm_get_resource_len(dev, 0) - 1; ++ switch (dev_priv->chipset) { ++ case 0x40: ++ for (i=0; iramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4, (val)) ++#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ ++ NV40_RAMFC_##offset/4) ++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE)) ++#define NV40_RAMFC__SIZE 128 ++ ++int ++nv40_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, ++ NV40_RAMFC__SIZE, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ NULL, &chan->ramfc))) ++ return ret; ++ ++ /* Fill entries that are seen filled in dumps of nvidia driver just ++ * after channel's is put into DMA mode ++ */ ++ RAMFC_WR(DMA_PUT , chan->pushbuf_base); ++ RAMFC_WR(DMA_GET , chan->pushbuf_base); ++ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4); ++ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | ++ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | ++#ifdef __BIG_ENDIAN ++ NV_PFIFO_CACHE1_BIG_ENDIAN | ++#endif ++ 0x30000000 /* no idea.. */); ++ RAMFC_WR(DMA_SUBROUTINE, 0); ++ RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4); ++ RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); ++ ++ /* enable the fifo dma operation */ ++ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<id)); ++ return 0; ++} ++ ++void ++nv40_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<id)); ++ ++ if (chan->ramfc) ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv40_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp, tmp2; ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE)); ++ ++ /* No idea what 0x2058 is.. */ ++ tmp = RAMFC_RD(DMA_FETCH); ++ tmp2 = NV_READ(0x2058) & 0xFFF; ++ tmp2 |= (tmp & 0x30000000); ++ NV_WRITE(0x2058, tmp2); ++ tmp &= ~0x30000000; ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp); ++ ++ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE)); ++ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP)); ++ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT)); ++ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE)); ++ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE)); ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE)); ++ NV_WRITE(0x32e4, RAMFC_RD(UNK_40)); ++ /* NVIDIA does this next line twice... */ ++ NV_WRITE(0x32e8, RAMFC_RD(UNK_44)); ++ NV_WRITE(0x2088, RAMFC_RD(UNK_4C)); ++ NV_WRITE(0x3300, RAMFC_RD(UNK_50)); ++ ++ /* not sure what part is PUT, and which is GET.. never seen a non-zero ++ * value appear in a mmio-trace yet.. ++ */ ++#if 0 ++ tmp = NV_READ(UNK_84); ++ NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???); ++ NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???); ++#endif ++ ++ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */ ++ tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF; ++ tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF; ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); ++ ++ /* Set channel active, and in DMA mode */ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, ++ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); ++ ++ /* Reset DMA_CTL_AT_INFO to INVALID */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); ++ ++ return 0; ++} ++ ++int ++nv40_fifo_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); ++ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT)); ++ RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE)); ++ RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT)); ++ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE)); ++ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH); ++ tmp |= NV_READ(0x2058) & 0x30000000; ++ RAMFC_WR(DMA_FETCH , tmp); ++ ++ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE)); ++ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1)); ++ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE)); ++ tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP); ++ RAMFC_WR(ACQUIRE_TIMESTAMP, tmp); ++ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT)); ++ RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE)); ++ ++ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something ++ * more involved depending on the value of 0x3228? ++ */ ++ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); ++ ++ RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); ++ ++ /* No idea what the below is for exactly, ripped from a mmio-trace */ ++ RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); ++ ++ /* NVIDIA do this next line twice.. bug? */ ++ RAMFC_WR(UNK_44 , NV_READ(0x32e8)); ++ RAMFC_WR(UNK_4C , NV_READ(0x2088)); ++ RAMFC_WR(UNK_50 , NV_READ(0x3300)); ++ ++#if 0 /* no real idea which is PUT/GET in UNK_48.. */ ++ tmp = NV_READ(NV04_PFIFO_CACHE1_GET); ++ tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16); ++ RAMFC_WR(UNK_48 , tmp); ++#endif ++ ++ return 0; ++} ++ ++int ++nv40_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if ((ret = nouveau_fifo_init(dev))) ++ return ret; ++ ++ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff); ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv40_graph.c git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c +--- git/drivers/gpu/drm-tungsten/nv40_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,2193 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++/*TODO: deciper what each offset in the context represents. The below ++ * contexts are taken from dumps just after the 3D object is ++ * created. ++ */ ++static void ++nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ /* Always has the "instance address" of itself at offset 0 */ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ /* unknown */ ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00180/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00184/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00188/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00011100); ++ for (i=0x00520; i<=0x0055c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x00594/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x005b4/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00610/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00618/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00628/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00); ++ /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */ ++ /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */ ++ for (i=0x006C0; i<=0x006fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */ ++ for (i=0x00700; i<=0x0073c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */ ++ /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */ ++ for (i=0x00780; i<=0x007bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */ ++ for (i=0x007c0; i<=0x007fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */ ++ for (i=0x00800; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */ ++ /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */ ++ for (i=0x00880; i<=0x008bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ /* unknown */ ++ for (i=0x00910; i<=0x0091c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00920; i<=0x0092c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00940; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00960; i<=0x0096c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00980/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x009b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x80800001); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c00/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c04/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c08/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00000001); ++ for (i=0x03008; i<=0x03080; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x05288; i<=0x08570; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x08628; i<=0x08e18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0bd28; i<=0x0f010; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0f0c8; i<=0x0f8b8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x127c8; i<=0x15ab0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x15b68; i<=0x16358; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x19268; i<=0x1c550; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x1c608; i<=0x1cdf8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x1fd08; i<=0x22ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x230a8; i<=0x23898; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x267a8; i<=0x29a90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x29b48; i<=0x2a338; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ for (i = 0x00000178; i <= 0x00000180; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i = 0x00000194; i <= 0x000001b0; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ for (i = 0x00000350; i <= 0x0000035c; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000430/4, 0x00011100); ++ for (i = 0x0000044c; i <= 0x00000488; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00); ++ for (i = 0x000005dc; i <= 0x00000618; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i = 0x0000061c; i <= 0x00000658; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i = 0x0000069c; i <= 0x000006d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i = 0x000006dc; i <= 0x00000718; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i = 0x0000071c; i <= 0x00000758; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i = 0x0000079c; i <= 0x000007d8; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i = 0x0000082c; i <= 0x00000838; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i = 0x0000083c; i <= 0x00000848; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i = 0x0000085c; i <= 0x00000868; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i = 0x0000087c; i <= 0x00000888; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff); ++ for (i = 0x00000ad4; i <= 0x00000ae4; i += 4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001); ++ for (i = 0x00002ee8; i <= 0x00002f60; i += 8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00005168; i <= 0x00007358; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00007368; i <= 0x00007758; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000a068; i <= 0x0000c258; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x0000c268; i <= 0x0000c658; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x0000ef68; i <= 0x00011158; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00011168; i <= 0x00011558; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i = 0x00013e68; i <= 0x00016058; i += 24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i = 0x00016068; i <= 0x00016458; i += 16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00194/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00198/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001a8/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001ac/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00); ++ for (i=0x005dc; i<=0x00618; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0061c; i<=0x00658; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x0069c; i<=0x006d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006dc; i<=0x00718; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0071c; i<=0x00758; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x0079c; i<=0x007d8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0082c; i<=0x00838; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0083c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0085c; i<=0x00868; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x0087c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x0089c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00020000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x02ec0; i<=0x02f38; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x04c80; i<=0x06e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x06e80; i<=0x07270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x096c0; i<=0x0b8b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0b8c0; i<=0x0bcb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0e100; i<=0x102f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x10300; i<=0x106f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++}; ++ ++static void ++nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0004c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00138/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00144/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0018c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00190/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00194/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00198/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001a4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00370/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00374/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00378/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x003b8/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003e8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00400/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00410/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00414/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00418/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x004d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x004d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00500/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00504/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00508/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00510/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00514/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00518/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00520/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00524/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00528/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00530/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00534/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00538/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00011100); ++ for (i=0x00578; i<0x005b4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x00608/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00658/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00664/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00674/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00); ++ for (i=0x0070c; i<=0x00748; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x0074c; i<=0x00788; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x007cc; i<=0x00808; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x0080c; i<=0x00848; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x0084c; i<=0x00888; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x008cc; i<=0x00908; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x0095c; i<=0x00968; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x0096c; i<=0x00978; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x0098c; i<=0x00998; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009ac; i<=0x009b8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a00/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00aec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b30/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b38/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bec/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000); ++ for (i=0x017f8; i<=0x01870; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x035b8; i<=0x057a8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x057b8; i<=0x05ba8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07f38; i<=0x0a128; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a138; i<=0x0a528; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c8b8; i<=0x0eaa8; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0eab8; i<=0x0eea8; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++/* This may only work on 7800 AGP cards, will include a warning */ ++static void ++nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00000178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040); ++ for (i=0x00000194; i<=0x000001b0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010); ++ for (i=0x000003c0; i<=0x000003fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000454/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00000458/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00000474/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000490/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000); ++ for (i=0x000004a4; i<=0x000004e0; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00000500/4, 0x00011100); ++ for (i=0x0000051c; i<=0x00000558; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00000590/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00000608/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00); ++ for (i=0x000006b0; i<=0x000006ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x000006f0; i<=0x0000072c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00000770; i<=0x000007ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x000007b0; i<=0x000007ec; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x000007f0; i<=0x0000082c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00000870; i<=0x000008ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80); ++ INSTANCE_WR(ctx, 0x00000910/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000914/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x00000918/4, 0x00000202); ++ INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202); ++ for (i=0x00000930; i<=0x0000095c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00000970/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00); ++ INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001); ++ for (i=0x00000b10; i<=0x00000b8c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff); ++ for (i=0x00000bdc; i<=0x00000bf8; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000); ++ for (i=0x00003000; i<=0x00003078; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00004dc0; i<=0x00006fb0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00006fc0; i<=0x000073b0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00009800; i<=0x0000b9f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0000ba00; i<=0x00010430; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00010440; i<=0x00010830; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x00012c80; i<=0x00014e70; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x00014e80; i<=0x00015270; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x000176c0; i<=0x000198b0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x000198c0; i<=0x00019cb0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0001c100; i<=0x0001e2f0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0001e300; i<=0x0001e6f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x1c1a0; i<=0x1e390; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x1e3a0; i<=0x1e790; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x20be0; i<=0x22dd0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x22de0; i<=0x231d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00003010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); ++ for (i=0x016c0; i<=0x01738; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03840; i<=0x05670; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05680; i<=0x05a70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x07e00; i<=0x09ff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0a000; i<=0x0a3f0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x0c780; i<=0x0e970; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x0e980; i<=0x0ed70; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040); ++ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00218/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00220/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00234/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00238/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00240/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00244/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00248/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00250/4, 0x80000000); ++ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00474/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00480/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00488/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00490/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x00498/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x00514/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00530/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00544/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00548/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00550/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00554/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00558/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00560/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00564/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00568/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00570/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00574/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00578/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00580/4, 0x88888888); ++ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x00630/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00); ++ for (i=0x00750; i<=0x0078c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00790; i<=0x007cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00810; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x00850; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00890; i<=0x008cc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00910; i<=0x0094c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x009a0; i<=0x009ac; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x009b0; i<=0x009bc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x009d0; i<=0x009dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x009f0; i<=0x009fc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421); ++ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00); ++ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000); ++ for(i=0x030a0; i<=0x03118; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x098a0; i<=0x0ba90; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x0baa0; i<=0x0be90; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x0e2e0; i<=0x0fff0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x10008; i<=0x104d0; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x104e0; i<=0x108d0; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x12d20; i<=0x14f10; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x14f20; i<=0x15310; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for(i=0x17760; i<=0x19950; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for(i=0x19960; i<=0x19d50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00011100); ++ for (i=0x00450; i<0x0048c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c4/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00530/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00534/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x0053c/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00); ++ for (i=0x005e0; i<=0x0061c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00620; i<=0x0065c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x006a0; i<=0x006dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006e0; i<=0x0071c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00720; i<=0x0075c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x007a0; i<=0x007dc; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00830; i<=0x0083c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00840; i<=0x0084c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00860; i<=0x0086c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00880; i<=0x0088c; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x008a0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008fc/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00934/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a74/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001); ++ for (i=0x016a0; i<0x01718; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03460; i<0x05650; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05660; i<0x05a50; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++static void ++nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i; ++ ++ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start); ++ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); ++ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); ++ INSTANCE_WR(ctx, 0x00128/4, 0x02008821); ++ INSTANCE_WR(ctx, 0x00158/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00168/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00170/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00174/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00178/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00180/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00188/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); ++ INSTANCE_WR(ctx, 0x00340/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x00350/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00354/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00358/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); ++ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00408/4, 0x46400000); ++ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); ++ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00011100); ++ for (i=0x0044c; i<=0x00488; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x07ff0000); ++ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); ++ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); ++ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); ++ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); ++ INSTANCE_WR(ctx, 0x00538/4, 0x00000098); ++ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); ++ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); ++ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); ++ for (i=0x005d8; i<=0x00614; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00018488); ++ for (i=0x00618; i<=0x00654; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00028202); ++ for (i=0x00698; i<=0x006d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0000aae4); ++ for (i=0x006d8; i<=0x00714; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x01012000); ++ for (i=0x00718; i<=0x00754; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ for (i=0x00798; i<=0x007d4; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00100008); ++ for (i=0x00828; i<=0x00834; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x0001bc80); ++ for (i=0x00838; i<=0x00844; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000202); ++ for (i=0x00858; i<=0x00864; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00000008); ++ for (i=0x00878; i<=0x00884; i+=4) ++ INSTANCE_WR(ctx, i/4, 0x00080008); ++ INSTANCE_WR(ctx, 0x00898/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); ++ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); ++ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); ++ INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00a94/4, 0x00005555); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001); ++ for (i=0x01668; i<=0x016e0; i+=8) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++ for (i=0x03428; i<=0x05618; i+=24) ++ INSTANCE_WR(ctx, i/4, 0x00000001); ++ for (i=0x05628; i<=0x05a18; i+=16) ++ INSTANCE_WR(ctx, i/4, 0x3f800000); ++} ++ ++int ++nv40_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); ++ int ret; ++ ++ /* These functions populate the graphics context with a whole heap ++ * of default state. All these functions are very similar, with ++ * a minimal amount of chipset-specific changes. However, as we're ++ * currently dependant on the context programs used by the NVIDIA ++ * binary driver these functions must match the layout expected by ++ * them. Hopefully at some point this will all change. ++ */ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ ctx_init = nv40_graph_context_init; ++ break; ++ case 0x41: ++ case 0x42: ++ ctx_init = nv41_graph_context_init; ++ break; ++ case 0x43: ++ ctx_init = nv43_graph_context_init; ++ break; ++ case 0x46: ++ ctx_init = nv46_graph_context_init; ++ break; ++ case 0x47: ++ ctx_init = nv47_graph_context_init; ++ break; ++ case 0x49: ++ ctx_init = nv49_graph_context_init; ++ break; ++ case 0x44: ++ case 0x4a: ++ ctx_init = nv4a_graph_context_init; ++ break; ++ case 0x4b: ++ ctx_init = nv4b_graph_context_init; ++ break; ++ case 0x4c: ++ case 0x67: ++ ctx_init = nv4c_graph_context_init; ++ break; ++ case 0x4e: ++ ctx_init = nv4e_graph_context_init; ++ break; ++ default: ++ ctx_init = nv40_graph_context_init; ++ break; ++ } ++ ++ /* Allocate a 175KiB block of PRAMIN to store the context. This ++ * is massive overkill for a lot of chipsets, but it should be safe ++ * until we're able to implement this properly (will happen at more ++ * or less the same time we're able to write our own context programs. ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, ++ NVOBJ_FLAG_ZERO_ALLOC, ++ &chan->ramin_grctx))) ++ return ret; ++ ++ /* Initialise default context values */ ++ ctx_init(dev, chan->ramin_grctx->gpuobj); ++ ++ return 0; ++} ++ ++void ++nv40_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); ++} ++ ++static int ++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 1000, tmp; ++ int i; ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310); ++ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); ++ ++ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304); ++ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); ++ ++ nouveau_wait_for_idle(dev); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT); ++ DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save); ++ DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n", ++ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, ++ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++/* Save current context (from PGRAPH) into the channel's context */ ++int ++nv40_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ return nv40_graph_transfer_context(dev, inst, 1); ++} ++ ++/* Restore the context for a specific channel into PGRAPH */ ++int ++nv40_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ int ret; ++ ++ if (!chan->ramin_grctx) ++ return -EINVAL; ++ inst = chan->ramin_grctx->instance >> 4; ++ ++ ret = nv40_graph_transfer_context(dev, inst, 0); ++ if (ret) ++ return ret; ++ ++ /* 0x40032C, no idea of it's exact function. Could simply be a ++ * record of the currently active PGRAPH context. It's currently ++ * unknown as to what bit 24 does. The nv ddx has it set, so we will ++ * set it here too. ++ */ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, ++ (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) | ++ NV40_PGRAPH_CTXCTL_CUR_LOADED); ++ /* 0x32E0 records the instance address of the active FIFO's PGRAPH ++ * context. If at any time this doesn't match 0x40032C, you will ++ * recieve PGRAPH_INTR_CONTEXT_SWITCH ++ */ ++ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); ++ return 0; ++} ++ ++/* These blocks of "magic numbers" are actually a microcode that the GPU uses ++ * to control how graphics contexts get saved and restored between PRAMIN ++ * and PGRAPH during a context switch. We're currently using values seen ++ * in mmio-traces of the binary driver. ++ */ ++static uint32_t nv40_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, ++ 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216, ++ 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100, ++ 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, ++ 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6, ++ 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b, ++ 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a, ++ 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008, ++ 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901, ++ 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038, ++ 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684, ++ 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68, ++ 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006, ++ 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284, ++ 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv41_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480, ++ 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a, ++ 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv43_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, ++ 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, ++ 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, ++ 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, ++ 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350, ++ 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++static uint32_t nv44_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06, ++ 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, ++ 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b, ++ 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d, ++ 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6, ++ 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158, ++ 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, ++ 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, ++ 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f, ++ 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec, ++ 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a, ++ 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc, ++ 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, ++ 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, ++ 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8, ++ 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001, ++ 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029, ++ 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000, ++ 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007, ++ 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv46_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, ++ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a, ++ 0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691, ++ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022, ++ 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, ++ 0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14, ++ 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80, ++ 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, ++ 0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320, ++ 0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081, ++ 0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a, ++ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv47_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606, ++ 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12, ++ 0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a, ++ 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, ++ 0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901, ++ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19, ++ 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00, ++ 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000, ++ 0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a, ++ 0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006, ++ 0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318, ++ 0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, ++ 0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f, ++ 0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880, ++ 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, ++ 0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006, ++ 0x0060000e, ~0 ++}; ++ ++//this is used for nv49 and nv4b ++static uint32_t nv49_4b_ctx_prog[] ={ ++ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, ++ 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, ++ 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, ++ 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000, ++ 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a, ++ 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210, ++ 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280, ++ 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, ++ 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118, ++ 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800, ++ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, ++ 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, ++ 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, ++ 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a, ++ 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88, ++ 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f, ++ 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280, ++ 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68, ++ 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e, ++ 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, ++ 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e, ++ 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60, ++ 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e, ++ 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005, ++ 0x00700006, 0x0060000e, ~0 ++}; ++ ++ ++static uint32_t nv4a_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, ++ 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, ++ 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, ++ 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, ++ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, ++ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, ++ 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, ++ 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, ++ 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, ++ 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, ++ 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05, ++ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4c_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, ++ 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, ++ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, ++ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6, ++ 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, ++ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, ++ 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, ++ 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, ++ 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, ++ 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a, ++ 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080, ++ 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168, ++ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, ++ 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, ++ 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306, ++ 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 ++}; ++ ++static uint32_t nv4e_ctx_prog[] = { ++ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, ++ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, ++ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, ++ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, ++ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, ++ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, ++ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, ++ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, ++ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, ++ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, ++ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, ++ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, ++ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, ++ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, ++ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, ++ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, ++ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, ++ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06, ++ 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, ++ 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06, ++ 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006, ++ 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004, ++ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, ++ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, ++ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, ++ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, ++ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, ++ 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, ++ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ++ ~0 ++}; ++ ++/* ++ * G70 0x47 ++ * G71 0x49 ++ * NV45 0x48 ++ * G72[M] 0x46 ++ * G73 0x4b ++ * C51_G7X 0x4c ++ * C51 0x4e ++ */ ++int ++nv40_graph_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = ++ (struct drm_nouveau_private *)dev->dev_private; ++ uint32_t *ctx_prog; ++ uint32_t vramsz, tmp; ++ int i, j; ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ++ ~NV_PMC_ENABLE_PGRAPH); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | ++ NV_PMC_ENABLE_PGRAPH); ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ctx_prog = nv40_ctx_prog; break; ++ case 0x41: ++ case 0x42: ctx_prog = nv41_ctx_prog; break; ++ case 0x43: ctx_prog = nv43_ctx_prog; break; ++ case 0x44: ctx_prog = nv44_ctx_prog; break; ++ case 0x46: ctx_prog = nv46_ctx_prog; break; ++ case 0x47: ctx_prog = nv47_ctx_prog; break; ++ case 0x49: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4a: ctx_prog = nv4a_ctx_prog; break; ++ case 0x4b: ctx_prog = nv49_4b_ctx_prog; break; ++ case 0x4c: ++ case 0x67: ctx_prog = nv4c_ctx_prog; break; ++ case 0x4e: ctx_prog = nv4e_ctx_prog; break; ++ default: ++ DRM_ERROR("Context program for 0x%02x unavailable\n", ++ dev_priv->chipset); ++ ctx_prog = NULL; ++ break; ++ } ++ ++ /* Load the context program onto the card */ ++ if (ctx_prog) { ++ DRM_DEBUG("Loading context program\n"); ++ i = 0; ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (ctx_prog[i] != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]); ++ i++; ++ } ++ } ++ ++ /* No context present currently */ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); ++ ++ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); ++ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0); ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055); ++ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000); ++ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f); ++ ++ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); ++ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); ++ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); ++ ++ j = NV_READ(0x1540) & 0xff; ++ if (j) { ++ for (i=0; !(j&1); j>>=1, i++); ++ NV_WRITE(0x405000, i); ++ } ++ ++ if (dev_priv->chipset == 0x40) { ++ NV_WRITE(0x4009b0, 0x83280fff); ++ NV_WRITE(0x4009b4, 0x000000a0); ++ } else { ++ NV_WRITE(0x400820, 0x83280eff); ++ NV_WRITE(0x400824, 0x000000a0); ++ } ++ ++ switch (dev_priv->chipset) { ++ case 0x40: ++ case 0x45: ++ NV_WRITE(0x4009b8, 0x0078e366); ++ NV_WRITE(0x4009bc, 0x0000014c); ++ break; ++ case 0x41: ++ case 0x42: /* pciid also 0x00Cx */ ++// case 0x0120: //XXX (pciid) ++ NV_WRITE(0x400828, 0x007596ff); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x43: ++ NV_WRITE(0x400828, 0x0072cb77); ++ NV_WRITE(0x40082c, 0x00000108); ++ break; ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4a: ++ case 0x4c: /* G7x-based C51 */ ++ case 0x4e: ++ NV_WRITE(0x400860, 0); ++ NV_WRITE(0x400864, 0); ++ break; ++ case 0x47: /* G70 */ ++ case 0x49: /* G71 */ ++ case 0x4b: /* G73 */ ++ NV_WRITE(0x400828, 0x07830610); ++ NV_WRITE(0x40082c, 0x0000016A); ++ break; ++ default: ++ break; ++ } ++ ++ NV_WRITE(0x400b38, 0x2ffff800); ++ NV_WRITE(0x400b3c, 0x00006000); ++ ++ /* copy tile info from PFB */ ++ switch (dev_priv->chipset) { ++ case 0x40: /* vanilla NV40 */ ++ for (i=0; ichipset) { ++ case 0x40: ++ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400820, 0); ++ NV_WRITE(0x400824, 0); ++ NV_WRITE(0x400864, vramsz); ++ NV_WRITE(0x400868, vramsz); ++ break; ++ default: ++ switch (dev_priv->chipset) { ++ case 0x46: ++ case 0x47: ++ case 0x49: ++ case 0x4b: ++ NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ default: ++ NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1)); ++ break; ++ } ++ NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0)); ++ NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1)); ++ NV_WRITE(0x400840, 0); ++ NV_WRITE(0x400844, 0); ++ NV_WRITE(0x4008A0, vramsz); ++ NV_WRITE(0x4008A4, vramsz); ++ break; ++ } ++ ++ /* per-context state, doesn't belong here */ ++ NV_WRITE(0x400B20, 0x00000000); ++ NV_WRITE(0x400B04, 0xFFFFFFFF); ++ ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; ++ NV_WRITE(NV10_PGRAPH_SURFACE, tmp); ++ ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); ++ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); ++ ++ return 0; ++} ++ ++void nv40_graph_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv40_mc.c git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c +--- git/drivers/gpu/drm-tungsten/nv40_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,38 @@ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++#include "nouveau_drm.h" ++ ++int ++nv40_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t tmp; ++ ++ /* Power up everything, resetting each individual unit will ++ * be done later if needed. ++ */ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ switch (dev_priv->chipset) { ++ case 0x44: ++ case 0x46: /* G72 */ ++ case 0x4e: ++ case 0x4c: /* C51_G7X */ ++ tmp = NV_READ(NV40_PFB_020C); ++ NV_WRITE(NV40_PMC_1700, tmp); ++ NV_WRITE(NV40_PMC_1704, 0); ++ NV_WRITE(NV40_PMC_1708, 0); ++ NV_WRITE(NV40_PMC_170C, tmp); ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv40_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv50_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c +--- git/drivers/gpu/drm-tungsten/nv50_fifo.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,343 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++struct nv50_fifo_priv { ++ struct nouveau_gpuobj_ref *thingo[2]; ++ int cur_thingo; ++}; ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_fifo_init_thingo(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ struct nouveau_gpuobj_ref *cur; ++ int i, nr; ++ ++ DRM_DEBUG("\n"); ++ ++ cur = priv->thingo[priv->cur_thingo]; ++ priv->cur_thingo = !priv->cur_thingo; ++ ++ /* We never schedule channel 0 or 127 */ ++ for (i = 1, nr = 0; i < 127; i++) { ++ if (dev_priv->fifos[i]) { ++ INSTANCE_WR(cur->gpuobj, nr++, i); ++ } ++ } ++ NV_WRITE(0x32f4, cur->instance >> 12); ++ NV_WRITE(0x32ec, nr); ++ NV_WRITE(0x2500, 0x101); ++} ++ ++static int ++nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = dev_priv->fifos[channel]; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d\n", channel); ++ ++ if (!chan->ramfc) ++ return -EINVAL; ++ ++ if (IS_G80) inst = chan->ramfc->instance >> 12; ++ else inst = chan->ramfc->instance >> 8; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), ++ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++ return 0; ++} ++ ++static void ++nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst; ++ ++ DRM_DEBUG("ch%d, nt=%d\n", channel, nt); ++ ++ if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; ++ else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; ++ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst); ++ ++ if (!nt) nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_fifo_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF); ++ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); ++} ++ ++static void ++nv50_fifo_init_context_table(struct drm_device *dev) ++{ ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) ++ nv50_fifo_channel_disable(dev, i, 1); ++ nv50_fifo_init_thingo(dev); ++} ++ ++static void ++nv50_fifo_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x250c, 0x6f3cfc34); ++} ++ ++static void ++nv50_fifo_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x2500, 0); ++ NV_WRITE(0x3250, 0); ++ NV_WRITE(0x3220, 0); ++ NV_WRITE(0x3204, 0); ++ NV_WRITE(0x3210, 0); ++ NV_WRITE(0x3270, 0); ++ ++ /* Enable dummy channels setup by nv50_instmem.c */ ++ nv50_fifo_channel_enable(dev, 0, 1); ++ nv50_fifo_channel_enable(dev, 127, 1); ++} ++ ++int ++nv50_fifo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv; ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.fifo.priv = priv; ++ ++ nv50_fifo_init_reset(dev); ++ nv50_fifo_init_intr(dev); ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); ++ if (ret) { ++ DRM_ERROR("error creating thingo0: %d\n", ret); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); ++ if (ret) { ++ DRM_ERROR("error creating thingo1: %d\n", ret); ++ return ret; ++ } ++ ++ nv50_fifo_init_context_table(dev); ++ nv50_fifo_init_regs__nv(dev); ++ nv50_fifo_init_regs(dev); ++ ++ return 0; ++} ++ ++void ++nv50_fifo_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); ++ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); ++ ++ dev_priv->Engine.fifo.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_fifo_channel_id(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & ++ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK); ++} ++ ++int ++nv50_fifo_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = NULL; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ if (IS_G80) { ++ uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; ++ uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start; ++ ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset, ++ 0x100, NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &ramfc, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ } else { ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, ++ &chan->ramfc); ++ if (ret) ++ return ret; ++ ramfc = chan->ramfc->gpuobj; ++ } ++ ++ INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4); ++ INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); ++ INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */ ++ INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff); ++ INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff); ++ INSTANCE_WR(ramfc, 0x10/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x08/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x40/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0); ++ INSTANCE_WR(ramfc, 0x54/4, 0x000f0000); ++ INSTANCE_WR(ramfc, 0x7c/4, 0x30000001); ++ INSTANCE_WR(ramfc, 0x78/4, 0x00000000); ++ INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); ++ ++ if (!IS_G80) { ++ INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id); ++ INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); ++ ++ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ ++ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); ++ } ++ ++ ret = nv50_fifo_channel_enable(dev, chan->id, 0); ++ if (ret) { ++ DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_fifo_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ nv50_fifo_channel_disable(dev, chan->id, 0); ++ ++ /* Dummy channel, also used on ch 127 */ ++ if (chan->id == 0) ++ nv50_fifo_channel_disable(dev, 127, 0); ++ ++ if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id) ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++} ++ ++int ++nv50_fifo_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ /*XXX: incomplete, only touches the regs that NV does */ ++ ++ NV_WRITE(0x3244, 0); ++ NV_WRITE(0x3240, 0); ++ ++ NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4)); ++ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4)); ++ NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4)); ++ NV_WRITE(0x3254, 1); ++ NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4)); ++ ++ if (!IS_G80) { ++ NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4)); ++ NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); ++ } ++ ++ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); ++ return 0; ++} ++ ++int ++nv50_fifo_save_context(struct nouveau_channel *chan) ++{ ++ DRM_DEBUG("ch%d\n", chan->id); ++ DRM_ERROR("stub!\n"); ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv50_graph.c git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c +--- git/drivers/gpu/drm-tungsten/nv50_graph.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,8286 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) ++ ++static void ++nv50_graph_init_reset(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21); ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); ++ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e); ++} ++ ++static void ++nv50_graph_init_intr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff); ++ NV_WRITE(0x400138, 0xffffffff); ++ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff); ++} ++ ++static void ++nv50_graph_init_regs__nv(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(0x400804, 0xc0000000); ++ NV_WRITE(0x406800, 0xc0000000); ++ NV_WRITE(0x400c04, 0xc0000000); ++ NV_WRITE(0x401804, 0xc0000000); ++ NV_WRITE(0x405018, 0xc0000000); ++ NV_WRITE(0x402000, 0xc0000000); ++ ++ NV_WRITE(0x400108, 0xffffffff); ++ ++ NV_WRITE(0x400824, 0x00004000); ++ NV_WRITE(0x400500, 0x00010001); ++} ++ ++static void ++nv50_graph_init_regs(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */); ++} ++ ++static uint32_t nv50_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x00417e4d, 0x00401e44, 0x00401e05, 0x00401e0d, 0x00415a06, ++ 0x00600005, 0x004015c5, 0x00600011, 0x00401c0b, 0x0090ffff, 0x0091ffff, ++ 0x00200020, 0x00600008, 0x0050004c, 0x00600009, 0x00415a45, 0x0041754d, ++ 0x0070009d, 0x004022cf, 0x0070009f, 0x0050009f, 0x00401fc0, 0x00200080, ++ 0x00600008, 0x00401f4f, 0x00401fc0, 0x004025cc, 0x00700081, 0x00200000, ++ 0x00600006, 0x00700000, 0x00111bfc, 0x00700080, 0x00700083, 0x00200047, ++ 0x00600006, 0x0011020a, 0x002005c0, 0x00600007, 0x00300000, 0x00c000ff, ++ 0x00c800ff, 0x00416507, 0x00202627, 0x008000ff, 0x00403c8c, 0x005000cb, ++ 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, 0x00170202, 0x0011020a, ++ 0x00200032, 0x0010020d, 0x001b0242, 0x00120302, 0x00140402, 0x00180500, ++ 0x00130509, 0x00150550, 0x00110605, 0x001e0607, 0x00110700, 0x00110900, ++ 0x00110902, 0x00110a00, 0x00160b02, 0x00110b28, 0x00140b2b, 0x00110c01, ++ 0x00111400, 0x00111405, 0x00111407, 0x00111409, 0x0011140b, 0x002000ea, ++ 0x00101500, 0x0040640f, 0x0040644b, 0x00213700, 0x00600007, 0x00200440, ++ 0x008800ff, 0x0070008f, 0x0040648c, 0x005000cb, 0x00000000, 0x001118f8, ++ 0x0020002b, 0x00101a05, 0x00131c00, 0x00111c04, 0x00141c20, 0x00111c25, ++ 0x00131c40, 0x00111c44, 0x00141c60, 0x00111c65, 0x00131c80, 0x00111c84, ++ 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00111cc4, 0x00141ce0, 0x00111ce5, ++ 0x00131d00, 0x00111d04, 0x00141d20, 0x00111d25, 0x00131d40, 0x00111d44, ++ 0x00141d60, 0x00111d65, 0x00131f00, 0x00191f40, 0x00409ee0, 0x00200217, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, ++ 0x00122100, 0x00122103, 0x00162200, 0x0040960f, 0x0040964b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040968c, 0x005000cb, ++ 0x00000000, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, ++ 0x0011238b, 0x00192394, 0x0040b0e1, 0x00200285, 0x00600006, 0x00200044, ++ 0x00102480, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, 0x00122503, ++ 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, 0x00122780, ++ 0x0011278b, 0x00192794, 0x0040cce2, 0x002002f3, 0x00600006, 0x00200044, ++ 0x00102880, 0x001128c6, 0x001528c9, 0x0040c00f, 0x0040c04b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040c08c, 0x005000cb, ++ 0x00000000, 0x001928d0, 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, ++ 0x00112a80, 0x00112b00, 0x00112b02, 0x00122b80, 0x00112b8b, 0x00192b94, ++ 0x0040dee3, 0x00200361, 0x00600006, 0x00200044, 0x00102c80, 0x00112cc6, ++ 0x00152cc9, 0x00192cd0, 0x00122d00, 0x00122d03, 0x00162e00, 0x00122e07, ++ 0x00112e80, 0x00112f00, 0x00112f02, 0x00122f80, 0x00112f8b, 0x00192f94, ++ 0x0040fae4, 0x002003cf, 0x00600006, 0x00200044, 0x00103080, 0x0040ec0f, ++ 0x0040ec4b, 0x00213700, 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, ++ 0x0040ec8c, 0x005000cb, 0x00000000, 0x001130c6, 0x001530c9, 0x001930d0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338b, 0x00193394, 0x00410ce5, 0x0020043d, ++ 0x00600006, 0x00200044, 0x00103480, 0x001134c6, 0x001534c9, 0x001934d0, ++ 0x00123500, 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, ++ 0x00113702, 0x00123780, 0x0011378b, 0x00193794, 0x004128e6, 0x002004ab, ++ 0x00600006, 0x00200044, 0x00103880, 0x00411a0f, 0x00411a4b, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x00411a8c, 0x005000cb, ++ 0x00000000, 0x001138c6, 0x001538c9, 0x001938d0, 0x00123900, 0x00123903, ++ 0x00163a00, 0x00123a07, 0x00113a80, 0x00113b00, 0x00113b02, 0x00123b80, ++ 0x00113b8b, 0x00193b94, 0x00413ae7, 0x00200519, 0x00600006, 0x00200044, ++ 0x00103c80, 0x00113cc6, 0x00153cc9, 0x00193cd0, 0x00123d00, 0x00123d03, ++ 0x00163e00, 0x00123e07, 0x00113e80, 0x00113f00, 0x00113f02, 0x00123f80, ++ 0x00113f8b, 0x00193f94, 0x00000000, 0x0041410f, 0x005000cb, 0x00213700, ++ 0x00600007, 0x00200440, 0x008800ff, 0x005000cb, 0x00414487, 0x0060000a, ++ 0x00000000, 0x00415300, 0x007000a0, 0x00700080, 0x002005c0, 0x00600007, ++ 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, 0x00200000, ++ 0x00600006, 0x00111bfe, 0x0041754d, 0x00700000, 0x00200000, 0x00600006, ++ 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, 0x00600004, ++ 0x0050004a, 0x00415f88, 0x0060000b, 0x00200000, 0x00600006, 0x00700000, ++ 0x0041750b, 0x00111bfd, 0x00402e4d, 0x00202627, 0x008000fd, 0x005000cb, ++ 0x00c00002, 0x002005c0, 0x00600007, 0x0020015f, 0x00800002, 0x005000cb, ++ 0x00c01802, 0x002024c8, 0x00800002, 0x005000cb, 0x00403a4d, 0x0060000b, ++ 0x0041734d, 0x00700001, 0x00700003, 0x00417906, 0x00417a05, 0x0060000d, ++ 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, 0x0070001c, ++ 0x0060000c, ~0 ++}; ++ ++static uint32_t nv84_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4, ++ 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, ++ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, ++ 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1, ++ 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c, ++ 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, ++ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, ++ 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb, ++ 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0, ++ 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00, ++ 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3, ++ 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c, ++ 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00, ++ 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02, ++ 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389, ++ 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5, ++ 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b, ++ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c, ++ 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500, ++ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, ++ 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f, ++ 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb, ++ 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080, ++ 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, ++ 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000, ++ 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916, ++ 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160, ++ 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, ++ 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003, ++ 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, ++ 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0 ++}; ++ ++static uint32_t nv86_ctx_voodoo[] = { ++ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, ++ 0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906, ++ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801, ++ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020, ++ 0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d, ++ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008, ++ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007, ++ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007, ++ 0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff, ++ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, ++ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, ++ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c, ++ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04, ++ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65, ++ 0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044, ++ 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103, ++ 0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, ++ 0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb, ++ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387, ++ 0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280, ++ 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000, ++ 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, ++ 0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006, ++ 0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd, ++ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002, ++ 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d, ++ 0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905, ++ 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, ++ 0x0060000c, ~0 ++}; ++ ++static uint32_t nv92_ctx_voodoo[] = { ++ 0x0070008E, 0x0070009C, 0x00200020, 0x00600008, 0x0050004C, 0x00400E89, ++ 0x00200000, 0x00600007, 0x00300000, 0x00C000FF, 0x00200000, 0x008000FF, ++ 0x00700009, 0x0041924D, 0x00402944, 0x00402905, 0x0040290D, 0x00416E06, ++ 0x00600005, 0x004015C5, 0x00600011, 0x0040270B, 0x004021C5, 0x00700000, ++ 0x00700081, 0x00600004, 0x0050004A, 0x00219600, 0x00600007, 0x00C02701, ++ 0x0020002E, 0x00800001, 0x005000CB, 0x0090FFFF, 0x0091FFFF, 0x00200020, ++ 0x00600008, 0x0050004C, 0x00600009, 0x00416E45, 0x0041894D, 0x0070009D, ++ 0x00402DCF, 0x0070009F, 0x0050009F, 0x00402AC0, 0x00200080, 0x00600008, ++ 0x00402A4F, 0x00402AC0, 0x004030CC, 0x00700081, 0x00200000, 0x00600006, ++ 0x00700000, 0x00111BFC, 0x00700083, 0x00300000, 0x00219600, 0x00600007, ++ 0x00C00A01, 0x0020001E, 0x00800001, 0x005000CB, 0x00C000FF, 0x00700080, ++ 0x00700083, 0x00200047, 0x00600006, 0x0011020A, 0x00200540, 0x00600007, ++ 0x00300000, 0x00C000FF, 0x00C800FF, 0x00417907, 0x00202DD2, 0x008000FF, ++ 0x0040508C, 0x005000CB, 0x00A0023F, 0x00200040, 0x00600006, 0x0070000F, ++ 0x00170202, 0x0011020A, 0x00200032, 0x0010020D, 0x001C0242, 0x00120302, ++ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000F, ++ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110A00, 0x00160B02, ++ 0x00120B28, 0x00140B2B, 0x00110C01, 0x00111400, 0x00111405, 0x00111407, ++ 0x00111409, 0x0011140B, 0x002000CB, 0x00101500, 0x0040790F, 0x0040794B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040798C, ++ 0x005000CB, 0x00000000, 0x00141A05, 0x00131A0C, 0x00131C00, 0x00121C04, ++ 0x00141C20, 0x00111C25, 0x00131C40, 0x00121C44, 0x00141C60, 0x00111C65, ++ 0x00131C80, 0x00121C84, 0x00141CA0, 0x00111CA5, 0x00131CC0, 0x00121CC4, ++ 0x00141CE0, 0x00111CE5, 0x00131F00, 0x00191F40, 0x0040A1E0, 0x002001C9, ++ 0x00600006, 0x00200044, 0x00102080, 0x001120C6, 0x001520C9, 0x001920D0, ++ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, ++ 0x00112302, 0x00122380, 0x0011238B, 0x00112394, 0x0011239C, 0x0040BEE1, ++ 0x00200230, 0x00600006, 0x00200044, 0x00102480, 0x0040AF0F, 0x0040AF4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040AF8C, ++ 0x005000CB, 0x00000000, 0x001124C6, 0x001524C9, 0x001924D0, 0x00122500, ++ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, ++ 0x00122780, 0x0011278B, 0x00112794, 0x0011279C, 0x0040D1E2, 0x00200297, ++ 0x00600006, 0x00200044, 0x00102880, 0x001128C6, 0x001528C9, 0x001928D0, ++ 0x00122900, 0x00122903, 0x00162A00, 0x00122A07, 0x00112A80, 0x00112B00, ++ 0x00112B02, 0x00122B80, 0x00112B8B, 0x00112B94, 0x00112B9C, 0x0040EEE3, ++ 0x002002FE, 0x00600006, 0x00200044, 0x00102C80, 0x0040DF0F, 0x0040DF4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040DF8C, ++ 0x005000CB, 0x00000000, 0x00112CC6, 0x00152CC9, 0x00192CD0, 0x00122D00, ++ 0x00122D03, 0x00162E00, 0x00122E07, 0x00112E80, 0x00112F00, 0x00112F02, ++ 0x00122F80, 0x00112F8B, 0x00112F94, 0x00112F9C, 0x004101E4, 0x00200365, ++ 0x00600006, 0x00200044, 0x00103080, 0x001130C6, 0x001530C9, 0x001930D0, ++ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, ++ 0x00113302, 0x00123380, 0x0011338B, 0x00113394, 0x0011339C, 0x00411EE5, ++ 0x002003CC, 0x00600006, 0x00200044, 0x00103480, 0x00410F0F, 0x00410F4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00410F8C, ++ 0x005000CB, 0x00000000, 0x001134C6, 0x001534C9, 0x001934D0, 0x00123500, ++ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, ++ 0x00123780, 0x0011378B, 0x00113794, 0x0011379C, 0x004131E6, 0x00200433, ++ 0x00600006, 0x00200044, 0x00103880, 0x001138C6, 0x001538C9, 0x001938D0, ++ 0x00123900, 0x00123903, 0x00163A00, 0x00123A07, 0x00113A80, 0x00113B00, ++ 0x00113B02, 0x00123B80, 0x00113B8B, 0x00113B94, 0x00113B9C, 0x00414EE7, ++ 0x0020049A, 0x00600006, 0x00200044, 0x00103C80, 0x00413F0F, 0x00413F4B, ++ 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00413F8C, ++ 0x005000CB, 0x00000000, 0x00113CC6, 0x00153CC9, 0x00193CD0, 0x00123D00, ++ 0x00123D03, 0x00163E00, 0x00123E07, 0x00113E80, 0x00113F00, 0x00113F02, ++ 0x00123F80, 0x00113F8B, 0x00113F94, 0x00113F9C, 0x00000000, 0x0041550F, ++ 0x005000CB, 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x005000CB, ++ 0x00415887, 0x0060000A, 0x00000000, 0x00416700, 0x007000A0, 0x00700080, ++ 0x00200540, 0x00600007, 0x00200004, 0x00C000FF, 0x008000FF, 0x005000CB, ++ 0x00700000, 0x00200000, 0x00600006, 0x00111BFE, 0x0041894D, 0x00700000, ++ 0x00200000, 0x00600006, 0x00111BFE, 0x00700080, 0x0070001D, 0x0040114D, ++ 0x00700081, 0x00600004, 0x0050004A, 0x00417388, 0x0060000B, 0x00200000, ++ 0x00600006, 0x00700000, 0x0041890B, 0x00111BFD, 0x0040424D, 0x00202DD2, ++ 0x008000FD, 0x005000CB, 0x00C00002, 0x00200540, 0x00600007, 0x00200160, ++ 0x00800002, 0x005000CB, 0x00C01802, 0x00202C72, 0x00800002, 0x005000CB, ++ 0x00404E4D, 0x0060000B, 0x0041874D, 0x00700001, 0x00700003, 0x00418D06, ++ 0x00418E05, 0x0060000D, 0x00700005, 0x0070000D, 0x00700006, 0x0070000B, ++ 0x0070000E, 0x0070001C, 0x0060000C, ~0 ++}; ++ ++static uint32_t nvaa_ctx_voodoo[] = { ++ 0x0070009c, 0x00300000, 0x0044f109, 0x00402d09, 0x0040e551, 0x00400a44, ++ 0x00400a05, 0x00400a0d, 0x0070008e, 0x0040124d, 0x0070009d, 0x0045004d, ++ 0x00700097, 0x00450121, 0x004446a1, 0x0044764d, 0x0044824d, 0x0070001d, ++ 0x00401806, 0x00600005, 0x00444445, 0x0044308b, 0x00401845, 0x0040234d, ++ 0x00700081, 0x00401ccf, 0x0070009f, 0x0050009f, 0x0044dc4d, 0x00700017, ++ 0x0040230b, 0x00447d4d, 0x00450221, 0x004456a1, 0x007000a0, 0x00700001, ++ 0x00700003, 0x00402706, 0x00402805, 0x0060000d, 0x00700005, 0x0070000d, ++ 0x00700006, 0x00700002, 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ++ 0x00000000, 0x0090ffff, 0x0091ffff, 0x0044d44d, 0x00600009, 0x0048004d, ++ 0x00700096, 0x00403acf, 0x0070009f, 0x0050009f, 0x0040e551, 0x004036c0, ++ 0x00200080, 0x00600008, 0x0040364f, 0x004036c0, 0x00403ecc, 0x00403651, ++ 0x00700016, 0x0048004d, 0x00600011, 0x0048004d, 0x0044364d, 0x0070008e, ++ 0x00700081, 0x0044704d, 0x00447d4d, 0x00700083, 0x00300000, 0x00212740, ++ 0x00600007, 0x00c00b01, 0x00200022, 0x00800001, 0x005000cb, 0x00c000ff, ++ 0x00445e4d, 0x0048004d, 0x0044ce08, 0x0044734d, 0x00448b4d, 0x00445e4d, ++ 0x0044e24d, 0x0044764d, 0x0044824d, 0x0048004d, 0x00700083, 0x0045034d, ++ 0x00a0023f, 0x00200040, 0x00600006, 0x0044fc4d, 0x00448d4d, 0x002001d0, ++ 0x0044b860, 0x00200280, 0x0038ffff, 0x0044cc4d, 0x00300000, 0x005000cb, ++ 0x00451c4d, 0x005000cb, 0x0044d007, 0x0048004d, 0x0044794d, 0x00111bfc, ++ 0x0048004d, 0x0044794d, 0x00111bfd, 0x0048004d, 0x0044794d, 0x00111bfe, ++ 0x0048004d, 0x00200000, 0x00700000, 0x00600006, 0x0048004d, 0x00200001, ++ 0x00600006, 0x0044fc4d, 0x0011020a, 0x0048004d, 0x00300000, 0x00c3ffff, ++ 0x00200000, 0x00600007, 0x00700000, 0x00200008, 0x008000ff, 0x005000cb, ++ 0x0048004d, 0x00000000, 0x0048004d, 0x00000000, 0x00170202, 0x00200032, ++ 0x0010020d, 0x001e0242, 0x001102c0, 0x00120302, 0x00150402, 0x00180500, ++ 0x00130509, 0x00150550, 0x00110605, 0x00200013, 0x00100607, 0x00110700, ++ 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, 0x00120b28, 0x00140b2b, ++ 0x00110c01, 0x00110d01, 0x00111400, 0x00111405, 0x00111407, 0x00111409, ++ 0x0011140b, 0x002000d4, 0x00101500, 0x00141a05, 0x00131a0c, 0x00131c00, ++ 0x00131c04, 0x00141c20, 0x00131c25, 0x00131f00, 0x00131f04, 0x00111f08, ++ 0x00111f0b, 0x00200015, 0x00101f40, 0x0048004d, 0x00600006, 0x00451c4d, ++ 0x00112020, 0x00112022, 0x00200085, 0x00102040, 0x001120c8, 0x001420ca, ++ 0x001b20cf, 0x00122100, 0x00122103, 0x00162140, 0x00122147, 0x00122153, ++ 0x001121a0, 0x001221c0, 0x001121cb, 0x001121d4, 0x001521d8, 0x0048004d, ++ 0x00000000, 0x0048004d, 0x0060000b, 0x0048004d, 0x0060000a, 0x0048004d, ++ 0x0060000b, 0x0040d24d, 0x00200020, 0x00600008, 0x0050004c, 0x0048004d, ++ 0x002003e8, 0x00600008, 0x0050004c, 0x0048004d, 0x00600004, 0x0050004a, ++ 0x0048004d, 0x00c000ff, 0x00c800ff, 0x0048004d, 0x00c000ff, 0x00c800ff, ++ 0x0048004d, 0x00700016, 0x0070008e, 0x00700082, 0x00500041, 0x0044d84d, ++ 0x00700095, 0x005000d1, 0x00600016, 0x00500052, 0x00700002, 0x00700015, ++ 0x0040284d, 0x0070008e, 0x0044d44d, 0x00200000, 0x00600007, 0x00300000, ++ 0x00c000ff, 0x00200000, 0x008000ff, 0x00700009, 0x0070000e, 0x0048004d, ++ 0x00700080, 0x00480017, 0x00700000, 0x0048004d, 0x0048004d, 0x0048004d, ++ 0x0048004d, 0x0070008e, 0x0044d44d, 0x00700083, 0x0044df4d, 0x00450c4d, ++ 0x0070000f, 0x00410b8c, 0x005000cb, 0x0048004d, 0x00200280, 0x00600007, ++ 0x00452307, 0x00451187, 0x0048004d, 0x00000000, 0x00202070, 0x0044fc4d, ++ 0x008000ff, 0x0048004d, 0x00210600, 0x00600007, 0x00200428, 0x0044fc4d, ++ 0x008800ff, 0x0048004d, 0x0048000f, 0x0048004b, 0x0045164d, 0x0070008f, ++ 0x0048008c, 0x005000cb, 0x0048004d, 0x00202070, 0x0044fc4d, 0x008000fd, ++ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200161, 0x0044fc4d, ++ 0x00800002, 0x005000cb, 0x00c00002, 0x00201f0e, 0x0044fc4d, 0x00800002, ++ 0x005000cb, 0x0048004d, ~0 ++}; ++ ++static int ++nv50_graph_init_ctxctl(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t *voodoo = NULL; ++ ++ DRM_DEBUG("\n"); ++ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ voodoo = nv50_ctx_voodoo; ++ break; ++ case 0x84: ++ voodoo = nv84_ctx_voodoo; ++ break; ++ case 0x86: ++ voodoo = nv86_ctx_voodoo; ++ break; ++ case 0x92: ++ voodoo = nv92_ctx_voodoo; ++ break; ++ case 0xaa: ++ voodoo = nvaa_ctx_voodoo; ++ break; ++ default: ++ DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); ++ return -EINVAL; ++ } ++ ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); ++ while (*voodoo != ~0) { ++ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); ++ voodoo++; ++ } ++ ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); ++ ++ return 0; ++} ++ ++int ++nv50_graph_init(struct drm_device *dev) ++{ ++ int ret; ++ ++ DRM_DEBUG("\n"); ++ ++ nv50_graph_init_reset(dev); ++ nv50_graph_init_intr(dev); ++ nv50_graph_init_regs__nv(dev); ++ nv50_graph_init_regs(dev); ++ ++ ret = nv50_graph_init_ctxctl(dev); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ++nv50_graph_takedown(struct drm_device *dev) ++{ ++ DRM_DEBUG("\n"); ++} ++ ++static void ++nv50_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x00120/4, 0xff400040); ++ INSTANCE_WR(ctx, 0x00124/4, 0xfff00080); ++ INSTANCE_WR(ctx, 0x00128/4, 0xfff70090); ++ INSTANCE_WR(ctx, 0x0012c/4, 0xffe806a8); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00214/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x00228/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00254/4, 0x0001fd87); ++ INSTANCE_WR(ctx, 0x00268/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x0026c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002a4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002a8/4, 0x0001005f); ++ INSTANCE_WR(ctx, 0x002b0/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002b4/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x002c8/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x002e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x002e8/4, 0x00300080); ++ INSTANCE_WR(ctx, 0x002ec/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00308/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0030c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00318/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0031c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00334/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00338/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0033c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00350/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00354/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00360/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x0000000a); ++ INSTANCE_WR(ctx, 0x003cc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00420/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00438/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00450/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00484/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0048c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00494/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004a8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004c8/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x004cc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x004e0/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x004f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00558/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00598/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x005d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005e8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00600/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00628/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x00630/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x00638/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00648/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x0064c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0065c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00660/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00668/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00678/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00680/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00688/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00690/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00698/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x006a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x006ac/4, 0x00000f80); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x00730/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x00754/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x00758/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00760/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00760/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00760/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x00778/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x0077c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00784/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00784/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00784/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00784/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x0079c/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007a0/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007a8/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x007c0/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007c4/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007cc/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x007e4/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x007e8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x007f0/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x00808/4, 0x1b74f820); ++ INSTANCE_WR(ctx, 0x0080c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00814/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00814/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00814/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x00814/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x0082c/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x00834/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00840/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x00844/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x0085c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00860/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00864/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00874/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00878/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0089c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x008a4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x008ac/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x008b4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x008b8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008f4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x008f8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0091c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00924/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00934/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00938/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00960/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x0096c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00984/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00984/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00984/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00984/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x009cc/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x009e4/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x009e8/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00a14/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00a1c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00a30/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00a5c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00a64/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00a6c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00a70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00a94/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00a98/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00a9c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ab0/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00ad4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00adc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ae4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00aec/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00b18/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00b24/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00b64/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00b9c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00ba0/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00bcc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00bd0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00be4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00be8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00c0c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00c14/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00c24/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00c28/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00c4c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c50/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00c54/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c64/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00c68/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c9c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ca4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00ca8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00cd0/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00cf4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00d54/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00d84/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d88/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00d8c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d9c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00da0/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00dc4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00dcc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00dd4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00de0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00e04/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e08/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00e0c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e1c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00e20/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00e44/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00e4c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00e54/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00e5c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00e60/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00e88/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x00e94/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00ed4/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00f0c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00f10/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00f3c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00f40/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00f44/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00f54/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00f58/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00f7c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00f84/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00f8c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00f94/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00fbc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fc0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00fc4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fd4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x00ffc/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01004/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0100c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01014/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01018/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01040/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x0104c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01064/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x01064/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x01064/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x01064/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0108c/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x010ac/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x010c4/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x010c8/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x010f4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x010f8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x010fc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0110c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01110/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0113c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01144/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0114c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01150/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01174/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01178/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0117c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0118c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01190/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x011b4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x011bc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x011c4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x011cc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x011d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x011f8/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x01204/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x0121c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01244/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x01244/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01244/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x01264/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x01264/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x0127c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x01280/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x012ac/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x012b0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x012b4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x012c4/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x012c8/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x012ec/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x012f4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x012fc/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01304/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01308/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0132c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01330/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x01334/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01344/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01348/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x0136c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01374/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0137c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01384/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01388/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x013b0/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x013bc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x013fc/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x0141c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x01434/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x01438/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01444/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01444/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01444/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01464/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01468/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0146c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0147c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01480/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x014a4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x014ac/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x014b4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x014bc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x014c0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x014e4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x014e8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x014ec/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x014fc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x01500/4, 0x000c0000); ++ INSTANCE_WR(ctx, 0x01524/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0152c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01534/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0153c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x01540/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01568/4, 0x00007070); ++ INSTANCE_WR(ctx, 0x01574/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x0158c/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015b4/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x015d4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x015ec/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x015f0/4, 0x00007fff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x000001ff); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02b40/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x02b60/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02b80/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02ba0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02bc0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02be0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c60/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02c80/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02ca0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x02cc0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0c600/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44f80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44fa0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x44fc0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45000/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45040/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x45060/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x45080/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x450e0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x45100/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x45160/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4c9a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4cc80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4ce00/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4ce20/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4ce60/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4cee0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4cf20/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x4d080/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4d0a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4d0c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x4d1e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4d260/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4d480/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4d4a0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x4d4c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d4e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d500/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d520/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4d940/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d960/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d980/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4d9e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4da80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4daa0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4dac0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4dae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x4db40/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x4db80/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01784/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01824/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x01a04/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01bc4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01be4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c24/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c44/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x01c84/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x01e24/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x042e4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04324/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e84/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15524/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15764/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15784/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x157c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x157e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15804/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15824/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x15864/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x15924/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15964/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15984/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x159a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x159c4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x159e4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x15ac4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b04/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15b44/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15be4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15c24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15c44/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x15cc4/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x16444/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x164e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x16544/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x16584/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x165a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x165c4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x165e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16604/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16624/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x185a4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x185c4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x18664/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x187e4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x18804/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x16708/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x16768/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x16948/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x16a28/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16a48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x16aa8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16d08/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x16de8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ee8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x16f08/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17108/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x171a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x171c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x171e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x17268/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17288/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x17508/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17528/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17548/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17568/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17588/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x175e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17608/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17628/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17648/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17668/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17688/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x176e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17708/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x17be8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x17c08/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17c68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17ca8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17cc8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17ce8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17d08/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x18108/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x18128/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x18608/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x18648/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18668/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18688/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x186e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18728/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x18768/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x188a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x188c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x188e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18908/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18ee8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18f28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18fa8/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18fc8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18fe8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19028/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19048/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x19088/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x190a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x190c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19108/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x19188/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x191a8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x19288/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x192a8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x199c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19a28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1a148/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x1a168/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x1a1c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a4a8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a508/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1a588/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a5a8/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1aa68/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1aaa8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1aae8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ab08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ab48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1aba8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1abe8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac48/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1ac68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1ac88/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x1acc8/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x25528/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x25548/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x25588/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x255a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x255c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x25608/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x25648/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x256c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x256e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25708/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25728/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25748/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25768/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25788/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x257e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25808/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25828/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25848/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25868/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25888/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x258a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x25d48/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x25d68/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x25dc8/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x0180c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0184c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x019ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01a0c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01a6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01b4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01ccc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0216c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0218c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x021ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0220c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0222c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0224c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0226c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0228c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0230c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0232c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0234c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0268c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x026cc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x029cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02acc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02cac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02ccc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02cec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02d6c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x02dac/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0306c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0308c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x030ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x030cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x030ec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0310c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0312c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031ac/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x031cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03e8c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0402c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0404c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x040ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0418c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x042ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x042cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0430c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0458c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x047ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x047cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x047ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0480c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0482c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0484c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0486c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0488c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x048ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0490c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0492c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0494c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0496c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0498c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x04ccc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x04d0c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x04dec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0500c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0510c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0520c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x052ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0530c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0532c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0534c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0536c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x053ac/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x053ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x056ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x056cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x056ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0570c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0572c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0574c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0576c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x057ec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0580c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0648c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x064cc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0666c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0668c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x066ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x067cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x068ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0690c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0694c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06bcc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x06dec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06e8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06eac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06ecc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06eec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06f8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06fac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x06fcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0730c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0734c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0742c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0746c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x074ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0764c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0774c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0784c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x078ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0790c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0792c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0794c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0796c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0798c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x079ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x079ec/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x07a2c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x07cec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07d6c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07d8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07dac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07e2c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x07e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08acc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08b0c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x08cac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08ccc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x08d2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08e0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08f2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x08f4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x08f8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0920c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0942c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0944c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0946c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0948c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x094ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0950c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0952c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0954c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0956c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0958c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x095ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0960c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0994c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0998c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x09a6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09aac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09aec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09c8c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x09d8c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x09e8c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x09f2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09f8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x09fac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x09fcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x09fec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a02c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0a06c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0a32c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a34c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0a36c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a38c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a3ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0a3cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a3ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0a46c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0a48c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b10c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0b14c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0b2ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b30c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0b36c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b44c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0b56c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b58c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0b5cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b84c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0ba6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0ba8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0baac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bacc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0baec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bb8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bbec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bc4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0bf8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0bfcc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0c0ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c0ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c12c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c2cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0c3cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0c4cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0c56c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c58c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c5cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c5ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c60c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c62c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c66c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0c6ac/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0c96c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c98c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0c9ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c9cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0c9ec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ca0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ca2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0caac/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0cacc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0d74c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0d78c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0d92c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0d94c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0d9ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0da8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0dbac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0dbcc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0dc0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0de8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0e0ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e0cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e0ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e10c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e12c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e14c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e16c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e18c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e1ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e20c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e22c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e24c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e26c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e28c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0e5cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0e60c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0e6ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e72c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e76c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0e90c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0ea0c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0eb0c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0ebac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ebcc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ebec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ec0c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ec2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ec4c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0ec6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0ecac/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x0ecec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0efac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0efcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0efec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f00c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f02c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0f04c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f06c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0f0ec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0f10c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01730/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a30/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b90/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02050/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02070/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02090/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b0/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02250/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16710/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16950/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b10/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b30/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b50/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17010/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17050/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17150/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17230/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17250/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17290/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17430/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17470/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17490/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17530/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17550/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17570/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17590/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17630/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17730/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17750/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17850/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17910/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd0/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18310/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18330/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18350/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18370/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18390/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18510/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18530/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18550/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18570/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18590/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18610/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18630/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18650/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18670/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18690/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18810/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18830/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18870/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18930/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e90/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19190/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19210/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19270/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19350/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19410/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19470/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19510/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19530/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19730/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19750/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19830/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19990/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b0/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f0/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a10/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a50/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d90/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a410/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a430/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a450/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a470/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a530/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea70/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee50/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee70/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee90/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f050/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f330/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f350/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f590/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f0/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f610/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f630/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f810/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f830/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f850/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f890/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f910/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f930/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f970/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f990/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa90/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff50/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30030/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30070/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30610/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30630/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30650/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30670/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30690/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30710/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30810/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30830/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30850/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30870/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30890/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30910/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30930/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30950/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30970/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30990/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a10/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a50/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b50/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b90/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c30/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c50/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c90/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d30/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31010/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31030/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31050/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f0/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31570/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31610/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31630/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31730/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31810/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31830/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31850/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31870/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31890/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a90/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b10/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b90/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d10/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d30/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d50/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d70/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01734/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a14/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a34/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b74/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b94/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02054/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02074/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02094/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b4/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02254/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16714/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16954/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b14/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b34/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b54/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f94/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17014/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17054/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17154/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17234/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17254/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17294/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17434/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17474/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17494/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17534/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17554/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17574/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17594/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17614/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17634/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17734/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17754/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17854/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17914/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd4/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d54/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18314/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18334/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18354/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18374/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18394/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18514/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18534/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18554/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18574/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18594/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18614/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18634/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18654/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18674/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18694/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f4/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18814/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18834/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18874/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18934/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a54/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf4/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e74/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e94/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19194/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19214/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19274/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19354/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19414/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19474/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b4/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d4/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19514/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19534/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19734/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19754/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19774/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b4/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19834/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19994/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b4/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d4/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f4/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a14/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a54/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a94/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d74/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d94/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e94/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f14/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a414/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a434/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a454/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a474/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a514/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a534/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea54/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea74/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee34/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee54/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee74/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee94/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f054/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f114/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f314/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f334/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f354/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f374/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f514/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f594/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f614/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f634/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f794/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f814/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f834/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f854/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f894/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f934/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f994/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa94/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc14/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc74/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff54/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff74/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30034/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30074/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30614/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30634/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30654/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30674/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30694/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f4/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30714/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30814/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30834/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30854/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30874/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30894/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f4/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30914/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30934/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30954/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30974/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30994/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f4/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a14/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a54/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b54/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b74/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b94/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c34/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c54/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c94/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d34/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f14/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31014/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31034/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31054/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f4/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31574/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31614/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31734/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31774/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d4/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31814/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31834/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31854/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31874/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31894/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a94/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b14/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b94/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d14/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d34/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d54/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d74/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db4/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x01738/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a38/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01ad8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b98/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02058/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02078/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x02098/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020b8/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020d8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020f8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x02118/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021d8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x02258/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16718/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16958/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16ad8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16af8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b18/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b38/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16db8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f98/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17058/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17158/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x17238/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17258/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17298/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172b8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x17438/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17458/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17478/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x17498/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17538/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17558/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17578/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17598/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17638/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17738/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17758/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17858/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17918/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bf8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cd8/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d58/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18318/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18338/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18358/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18378/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x18398/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184b8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18518/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18538/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18558/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18578/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18598/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18618/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18638/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18658/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18678/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18698/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187f8/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18818/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18838/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18878/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188d8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18938/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189d8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a58/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cb8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cd8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cf8/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e78/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e98/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x19198/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x19218/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x19278/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19358/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19418/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19478/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194b8/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194d8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19518/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19538/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x19738/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19758/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197b8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19838/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199b8/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199d8/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199f8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a18/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a58/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d98/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19eb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ed8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19ef8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3d8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a418/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a438/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a458/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a478/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a518/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a538/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea78/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecb8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee38/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee78/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee98/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eeb8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f058/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f118/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f318/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f338/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f358/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f378/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3b8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4b8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f518/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f598/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5f8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f618/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f638/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f798/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f818/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f838/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f858/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f898/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f918/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f998/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa98/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fab8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbb8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc18/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fdd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fed8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fff8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30038/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x30078/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30618/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30638/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30658/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30678/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30698/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30718/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30818/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30838/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30858/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30878/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30898/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30918/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30938/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30958/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30978/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30998/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a18/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b58/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c38/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c98/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d38/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30db8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30dd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ff8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31018/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x31038/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x31058/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310d8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311d8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314f8/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x31578/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x31618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31638/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31738/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d8/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x31818/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31838/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31858/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31878/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31898/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a98/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ab8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31ad8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b18/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b98/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d18/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d38/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d58/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d78/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31db8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0173c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x019fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01a3c/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x01adc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b7c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b9c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x01bbc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0205c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0207c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0209c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x020bc/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x020dc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x020fc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0211c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021dc/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x0225c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x166fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1671c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1695c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x16adc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16afc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b1c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b3c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16b5c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16c7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16dbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f9c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16fdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ffc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1701c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1705c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1715c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x171bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1723c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1725c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1729c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172bc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x172dc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x1743c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1745c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1747c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1749c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x174dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x174fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1753c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1755c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1757c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1759c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x175fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1761c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1763c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1773c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1775c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1785c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x178bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x178dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1791c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x179dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17a7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17b7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17bfc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17c1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17cdc/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x17d1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17d5c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x182bc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182dc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x182fc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1831c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1833c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1835c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1837c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x1839c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x183bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x184bc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x184dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x184fc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1851c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1853c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1855c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1857c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x1859c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185bc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x185dc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x185fc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1861c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1863c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1865c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1867c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x1869c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186bc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x186dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x186fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x187fc/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x1881c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1883c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1887c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x188dc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x188fc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1893c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x189dc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x18a5c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18a7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18bbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18c5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18cbc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18cdc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18cfc/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18d7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18e7c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18e9c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1919c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x1921c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1927c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x192bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x192dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1935c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1941c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1947c/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x194bc/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x194dc/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x194fc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1951c/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x1953c/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x1973c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1975c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1977c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x197bc/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x197dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1983c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1995c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1999c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x199bc/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x199dc/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x199fc/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x19a1c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x19a5c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19a9c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19d9c/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x19e3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e9c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19ebc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19edc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19efc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x19f1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a3dc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1a3fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a41c/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x1a43c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a45c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x1a47c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a51c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a53c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a5bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ea7c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2ecbc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2ee3c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2ee5c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee7c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ee9c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eebc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2efdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f05c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f11c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f2fc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f31c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f33c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f35c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f37c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3bc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f4bc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f51c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f59c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f5fc/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f61c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f63c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f79c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f7dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f7fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f81c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f83c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f85c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f89c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f8fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f91c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f93c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f95c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f97c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f99c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa9c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2fabc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2fbbc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x2fc1c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fc3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2fd3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fddc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fedc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2ff5c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2ff7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ffbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fffc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3003c/4, 0x000003ff); ++ INSTANCE_WR(ctx, 0x3007c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x300bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3061c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3063c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3065c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3067c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3069c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306bc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306dc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x306fc/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x3071c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x3081c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x3083c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3085c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3087c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3089c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308bc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x308fc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3091c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3093c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3095c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3097c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3099c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309bc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309dc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x309fc/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a1c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30a3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30a5c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x30b5c/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30b7c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30b9c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30bdc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30c3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x30c5c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30c9c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x30d3c/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x30dbc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30ddc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30f1c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30fbc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30ffc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3101c/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x3103c/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x3105c/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x310dc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x311dc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x311fc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x314fc/4, 0x00003e60); ++ INSTANCE_WR(ctx, 0x3157c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x315dc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x3161c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3163c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x316bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3173c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x3177c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317dc/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x3181c/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x3183c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x3185c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x3187c/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x3189c/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x31a9c/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31abc/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31adc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b1c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31b3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31b9c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31cbc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31d1c/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31d3c/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31d5c/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x31d7c/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x31dbc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x4dc00/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4dc40/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc60/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc80/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dca0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd00/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd60/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd80/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dda0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dde0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4de00/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df80/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dfa0/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfc0/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfe0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e040/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e0a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e120/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e140/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e2a0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e380/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e400/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e420/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e440/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e460/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e4a0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e560/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e580/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5e0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e700/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e7a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8e0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e900/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e920/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e940/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e980/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55e00/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc24/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc44/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc64/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc84/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dce4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd44/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd64/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd84/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddc4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4dde4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df64/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df84/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfa4/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfc4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e024/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e084/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0a4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e104/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e124/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e284/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e364/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e384/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e404/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e424/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e484/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e544/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e564/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5c4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6e4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e784/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8c4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e904/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e924/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e944/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e964/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e984/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55de4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e24/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc28/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc48/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc68/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc88/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dce8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd48/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd68/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd88/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4dde8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df68/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df88/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfa8/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfc8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e028/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e088/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0a8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e108/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e128/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e288/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e368/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e388/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3a8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e408/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e428/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e448/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e488/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e548/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e568/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5c8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6e8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e788/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8c8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e908/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e928/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e948/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e968/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e988/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55de8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e28/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc2c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc4c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc6c/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc8c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd4c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd6c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd8c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddcc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddec/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df6c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df8c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfac/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfcc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e02c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e08c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0ac/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e10c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e12c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e28c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e36c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e38c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3ac/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3cc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e40c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e42c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e44c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e48c/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e54c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e56c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e78c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8cc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e90c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e92c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e94c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e96c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e98c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9cc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55dec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc30/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc50/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc70/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc90/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd50/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd70/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df90/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb0/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e090/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e290/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e370/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e390/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e410/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e430/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e490/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e550/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e570/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e790/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e910/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e930/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e950/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e970/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e990/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc34/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc54/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc74/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc94/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd54/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd74/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd94/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df74/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df94/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb4/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e034/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e094/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e134/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e294/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e374/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e394/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e414/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e434/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e494/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e554/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e574/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f4/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e794/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e934/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e994/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc38/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc58/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc78/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc98/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd58/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd78/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4ddd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddf8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df98/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfb8/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfd8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e038/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e098/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0b8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e118/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e138/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e298/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e378/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e398/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3b8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3d8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e418/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e438/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e458/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e498/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e558/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e578/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5d8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6f8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e798/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8d8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e918/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e938/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55df8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dc3c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4dc5c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dc7c/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dc9c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dcfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dd5c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd7c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4dd9c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4dddc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4ddfc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4df7c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x4df9c/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x4dfbc/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x4dfdc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e03c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e09c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0bc/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x4e0dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e11c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e13c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e29c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e37c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e39c/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3bc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3dc/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x4e3fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e41c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e43c/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x4e45c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e49c/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x4e55c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4e57c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e5bc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e5dc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x4e6fc/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x4e79c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4e8dc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e8fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e91c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e93c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x4e95c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e97c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e99c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4e9dc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x55dfc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x55e3c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00858/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00774/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00784/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00798/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007a8/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007bc/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007e0/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x007f0/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00804/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00814/4, 0x00000000); ++ INSTANCE_WR(ctx, 0x00828/4, 0x00000000); ++} ++ ++static void ++nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x00130/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x0022c/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00258/4, 0x00000187); ++ INSTANCE_WR(ctx, 0x0026c/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x00270/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002ac/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df); ++ INSTANCE_WR(ctx, 0x002b8/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002d0/4, 0x01000000); ++ INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002dc/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x002f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080); ++ INSTANCE_WR(ctx, 0x002fc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00318/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0031c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00328/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0032c/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00344/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00348/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00360/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00364/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00378/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0037c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00380/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00384/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0038c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00394/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00398/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003c0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003c8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003dc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00404/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x0040c/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00420/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00434/4, 0x00000029); ++ INSTANCE_WR(ctx, 0x00438/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x0043c/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x00440/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00444/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00448/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00454/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0045c/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00460/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00464/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00468/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x0046c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x004e4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x004e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004ec/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x004f0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x004f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00500/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00504/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00508/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x0050c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0051c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00520/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00530/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00534/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00560/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00564/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00570/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0057c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00588/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x00590/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005c8/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005cc/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x005e0/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x005f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00614/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0061c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00630/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80); ++ INSTANCE_WR(ctx, 0x00684/4, 0x007f0080); ++ INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080); ++ ++ INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x006e8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x006f0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00700/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00718/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00724/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00728/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00738/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00740/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00744/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00750/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x00760/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x00768/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f); ++ INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x00774/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x00778/4, 0xb7892080); ++ ++ INSTANCE_WR(ctx, 0x00784/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0078c/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00798/4, 0x00010040); ++ INSTANCE_WR(ctx, 0x0079c/4, 0x00000022); ++ ++ INSTANCE_WR(ctx, 0x007b4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007b8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x007bc/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x007d0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x007f4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x007fc/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00804/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0080c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00810/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00834/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00838/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x0083c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00850/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00874/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x0087c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00884/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x0088c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00890/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x008b8/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x008dc/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x008e8/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00904/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x0090c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00910/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00918/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00924/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00928/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x0092c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00930/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00950/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00954/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00958/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0096c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00990/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00998/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x009a8/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x009ac/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x009d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009d4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x009d8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x009ec/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00a10/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00a28/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00a54/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00a78/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00a80/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00a84/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00acc/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00aec/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00af0/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00af4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b08/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00b34/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00b44/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00b48/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b70/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00b74/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00b88/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00bac/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00c14/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00c18/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00c20/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00c44/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00c50/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00c60/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00c64/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00c68/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00c88/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00c90/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d08/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00d10/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00d24/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00d48/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00d50/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00d60/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00d64/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00db0/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00db4/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00db8/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00de0/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00dec/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00e00/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00e04/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00e24/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e28/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00e40/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00e64/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00e80/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00eac/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00eec/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00efc/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00f00/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00f28/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00f50/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00f54/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x00f58/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x00f74/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x00f84/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00f88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01000/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01008/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01010/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01018/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0101c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x01040/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01044/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x01048/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x0105c/4, 0x00880000); ++ INSTANCE_WR(ctx, 0x01080/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x01088/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x01090/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x01098/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x0109c/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x010c4/4, 0x00027070); ++ INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff); ++ INSTANCE_WR(ctx, 0x010e8/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x010ec/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x010f0/4, 0x05100202); ++ INSTANCE_WR(ctx, 0x010f4/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x01110/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x01118/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x01120/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01124/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01130/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01138/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x0113c/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x01148/4, 0x0077f005); ++ INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff); ++ ++ INSTANCE_WR(ctx, 0x01230/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01284/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0130c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01324/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x014ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x014f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01504/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0150c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01510/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01530/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x0156c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x015d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0164c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01650/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01670/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01690/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x016e4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01724/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01744/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x0176c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01784/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0178c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x017cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01924/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01b70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01b90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01c90/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d10/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0218c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x022ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x022ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0232c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x024cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x026cc/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0280c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0284c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0286c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x02bac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02cec/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0398c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03bec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03dec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x03e44/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x040cc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x0480c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x0492c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0496c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x049ac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eac/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04eec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x0522c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0524c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0526c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0528c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ac/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0536c/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x0538c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083a0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x083c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x083e0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x08400/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08420/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08440/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x084c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x084e0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08500/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x08520/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x11e40/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x11e60/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15044/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x152e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15304/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15324/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x15344/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x15384/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x15444/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15484/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154a4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x15504/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x155e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15624/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15664/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15704/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x15744/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x15764/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x157e4/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x16004/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x16064/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160a4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x160c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x160e4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x16104/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16124/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x161b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x161d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x16228/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x16408/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x16410/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x164e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16508/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x16568/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16590/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x16730/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x16870/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x169c8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16a10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16a70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16a90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16b10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x16c68/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16c70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x16c88/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x16d10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16d28/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x16d48/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x16de8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f30/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17008/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17010/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17028/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17030/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17048/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17050/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17068/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17070/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17088/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17090/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x170f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17108/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17128/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17148/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17168/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x17188/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17208/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x17210/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x17370/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17410/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x174d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17570/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x176f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17708/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x17710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17750/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17768/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x17810/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17828/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17850/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17c88/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x17db0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17df0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e10/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x17e30/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e50/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e70/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17e90/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18030/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18050/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18070/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x18090/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x180d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x180f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18110/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18130/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18150/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18168/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x18170/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x18190/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x181c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x181e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18208/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18228/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18248/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18288/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x182c8/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x182f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x18310/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18330/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x183d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x183f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x18408/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18428/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18430/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x18448/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18468/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x186b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18750/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x187b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x187d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x187f0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x18870/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x18990/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18b08/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18b48/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18be8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18c28/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x18d10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f); ++ INSTANCE_WR(ctx, 0x18d68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18d70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18db0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18de8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x18e48/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x18e50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18f30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x19010/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x19030/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x19050/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x19070/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x19310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x19370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x194f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19530/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19550/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19570/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x19630/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19708/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x19768/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x198f0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19910/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19930/4, 0x00608080); ++ INSTANCE_WR(ctx, 0x199d0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19a70/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19e88/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x19f08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f30/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f50/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x19f70/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x19f90/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a090/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1a110/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1a808/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1a848/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x1a888/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1a988/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d328/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x2d348/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2d368/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x2d468/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d488/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d508/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d528/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d548/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d568/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d588/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d608/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d628/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2d648/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2e990/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f070/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f330/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f350/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f390/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f410/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f430/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f450/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f470/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f490/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x2f770/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f810/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2f970/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x301b0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301d0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x301f0/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30210/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30230/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30250/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30270/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x30290/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x302b0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x303d0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x303f0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30410/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30430/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30450/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30470/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30490/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304b0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x304d0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x304f0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30510/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30530/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30550/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30570/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30590/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305b0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x305d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x306f0/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x30710/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30730/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x307d0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x307f0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x30830/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x30950/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30b50/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x30b90/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x30c70/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x31110/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x31170/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x311b0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x311d0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31250/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x312f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31330/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x31410/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x31430/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x31450/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x31470/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x31710/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x31770/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x318f0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31930/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x31950/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31970/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a800/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a840/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x4a980/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x526a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x526c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52700/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x52780/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x52920/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x52940/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52960/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x52a80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52b00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x52d40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x52d60/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53200/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53220/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53240/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53260/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53280/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53300/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53320/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53340/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53360/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53380/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x53400/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x53460/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x53500/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53524/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53540/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53544/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53560/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53564/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53580/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53584/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x535e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53600/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53644/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53660/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53684/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x536a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x536c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53824/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53840/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x53844/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53860/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x53864/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53880/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x53884/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538a0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x538e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53900/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53944/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53984/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x539a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x539c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x53c64/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53c84/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x53de4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x53e24/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e40/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x53e44/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53e60/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x54004/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54020/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54144/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54160/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x54164/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54180/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54184/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541a4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x541c4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x541e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54200/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54204/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x54244/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x54260/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x5b700/4, 0x00000001); ++} ++ ++static void ++nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x10C/4, 0x30); ++ INSTANCE_WR(ctx, 0x1D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x218/4, 0xFE0C); ++ INSTANCE_WR(ctx, 0x22C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x258/4, 0x187); ++ INSTANCE_WR(ctx, 0x26C/4, 0x1018); ++ INSTANCE_WR(ctx, 0x270/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2AC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF); ++ INSTANCE_WR(ctx, 0x2B8/4, 0x600); ++ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); ++ INSTANCE_WR(ctx, 0x2D4/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x2F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F8/4, 0x80); ++ INSTANCE_WR(ctx, 0x2FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x318/4, 0x2); ++ INSTANCE_WR(ctx, 0x31C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328/4, 0x1); ++ INSTANCE_WR(ctx, 0x32C/4, 0x100); ++ INSTANCE_WR(ctx, 0x344/4, 0x2); ++ INSTANCE_WR(ctx, 0x348/4, 0x1); ++ INSTANCE_WR(ctx, 0x34C/4, 0x1); ++ INSTANCE_WR(ctx, 0x35C/4, 0x1); ++ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x364/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x36C/4, 0x1); ++ INSTANCE_WR(ctx, 0x370/4, 0x1); ++ INSTANCE_WR(ctx, 0x378/4, 0x1); ++ INSTANCE_WR(ctx, 0x37C/4, 0x1); ++ INSTANCE_WR(ctx, 0x380/4, 0x1); ++ INSTANCE_WR(ctx, 0x384/4, 0x4); ++ INSTANCE_WR(ctx, 0x388/4, 0x1); ++ INSTANCE_WR(ctx, 0x38C/4, 0x1); ++ INSTANCE_WR(ctx, 0x390/4, 0x1); ++ INSTANCE_WR(ctx, 0x394/4, 0x7); ++ INSTANCE_WR(ctx, 0x398/4, 0x1); ++ INSTANCE_WR(ctx, 0x39C/4, 0x7); ++ INSTANCE_WR(ctx, 0x3A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3C0/4, 0x100); ++ INSTANCE_WR(ctx, 0x3C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D4/4, 0x100); ++ INSTANCE_WR(ctx, 0x3D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x404/4, 0x4); ++ INSTANCE_WR(ctx, 0x408/4, 0x70); ++ INSTANCE_WR(ctx, 0x40C/4, 0x80); ++ INSTANCE_WR(ctx, 0x420/4, 0xC); ++ INSTANCE_WR(ctx, 0x428/4, 0x8); ++ INSTANCE_WR(ctx, 0x42C/4, 0x14); ++ INSTANCE_WR(ctx, 0x434/4, 0x29); ++ INSTANCE_WR(ctx, 0x438/4, 0x27); ++ INSTANCE_WR(ctx, 0x43C/4, 0x26); ++ INSTANCE_WR(ctx, 0x440/4, 0x8); ++ INSTANCE_WR(ctx, 0x444/4, 0x4); ++ INSTANCE_WR(ctx, 0x448/4, 0x27); ++ INSTANCE_WR(ctx, 0x454/4, 0x1); ++ INSTANCE_WR(ctx, 0x458/4, 0x2); ++ INSTANCE_WR(ctx, 0x45C/4, 0x3); ++ INSTANCE_WR(ctx, 0x460/4, 0x4); ++ INSTANCE_WR(ctx, 0x464/4, 0x5); ++ INSTANCE_WR(ctx, 0x468/4, 0x6); ++ INSTANCE_WR(ctx, 0x46C/4, 0x7); ++ INSTANCE_WR(ctx, 0x470/4, 0x1); ++ INSTANCE_WR(ctx, 0x4B4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x4E4/4, 0x80); ++ INSTANCE_WR(ctx, 0x4E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x4EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x4F0/4, 0x3); ++ INSTANCE_WR(ctx, 0x4F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x500/4, 0x12); ++ INSTANCE_WR(ctx, 0x504/4, 0x10); ++ INSTANCE_WR(ctx, 0x508/4, 0xC); ++ INSTANCE_WR(ctx, 0x50C/4, 0x1); ++ INSTANCE_WR(ctx, 0x51C/4, 0x4); ++ INSTANCE_WR(ctx, 0x520/4, 0x2); ++ INSTANCE_WR(ctx, 0x524/4, 0x4); ++ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x534/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x55C/4, 0x4); ++ INSTANCE_WR(ctx, 0x560/4, 0x14); ++ INSTANCE_WR(ctx, 0x564/4, 0x1); ++ INSTANCE_WR(ctx, 0x570/4, 0x2); ++ INSTANCE_WR(ctx, 0x57C/4, 0x1); ++ INSTANCE_WR(ctx, 0x584/4, 0x2); ++ INSTANCE_WR(ctx, 0x588/4, 0x1000); ++ INSTANCE_WR(ctx, 0x58C/4, 0xE00); ++ INSTANCE_WR(ctx, 0x590/4, 0x1000); ++ INSTANCE_WR(ctx, 0x594/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x59C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BC/4, 0x200); ++ INSTANCE_WR(ctx, 0x5C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C8/4, 0x70); ++ INSTANCE_WR(ctx, 0x5CC/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC/4, 0x70); ++ INSTANCE_WR(ctx, 0x5E0/4, 0x80); ++ INSTANCE_WR(ctx, 0x5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5F4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x60C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x614/4, 0x2); ++ INSTANCE_WR(ctx, 0x61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x624/4, 0x1); ++ INSTANCE_WR(ctx, 0x62C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x630/4, 0xCF); ++ INSTANCE_WR(ctx, 0x634/4, 0x1); ++ INSTANCE_WR(ctx, 0x63C/4, 0xF80); ++ INSTANCE_WR(ctx, 0x684/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080); ++ INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6E8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6F0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6F4/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x700/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x710/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x718/4, 0x1000); ++ INSTANCE_WR(ctx, 0x71C/4, 0x1F); ++ INSTANCE_WR(ctx, 0x720/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x724/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x728/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x734/4, 0x10040); ++ INSTANCE_WR(ctx, 0x73C/4, 0x22); ++ INSTANCE_WR(ctx, 0x748/4, 0x10040); ++ INSTANCE_WR(ctx, 0x74C/4, 0x22); ++ INSTANCE_WR(ctx, 0x764/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x768/4, 0x160000); ++ INSTANCE_WR(ctx, 0x76C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x780/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x7A4/4, 0x10401); ++ INSTANCE_WR(ctx, 0x7AC/4, 0x78); ++ INSTANCE_WR(ctx, 0x7B4/4, 0xBF); ++ INSTANCE_WR(ctx, 0x7BC/4, 0x1210); ++ INSTANCE_WR(ctx, 0x7C0/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x7E4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7E8/4, 0x160000); ++ INSTANCE_WR(ctx, 0x7EC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x800/4, 0x8C0000); ++ INSTANCE_WR(ctx, 0x824/4, 0x10401); ++ INSTANCE_WR(ctx, 0x82C/4, 0x78); ++ INSTANCE_WR(ctx, 0x834/4, 0xBF); ++ INSTANCE_WR(ctx, 0x83C/4, 0x1210); ++ INSTANCE_WR(ctx, 0x840/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x868/4, 0x27070); ++ INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x88C/4, 0x120407); ++ INSTANCE_WR(ctx, 0x890/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x894/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x898/4, 0x30201); ++ INSTANCE_WR(ctx, 0x8B4/4, 0x40); ++ INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x8BC/4, 0x141210); ++ INSTANCE_WR(ctx, 0x8C0/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x8C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x8C8/4, 0x3); ++ INSTANCE_WR(ctx, 0x8D4/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x8D8/4, 0x100); ++ INSTANCE_WR(ctx, 0x8DC/4, 0x3800); ++ INSTANCE_WR(ctx, 0x8E0/4, 0x404040); ++ INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x8EC/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x7BA0/4, 0x21); ++ INSTANCE_WR(ctx, 0x7BC0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7BE0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7C00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C20/4, 0x100); ++ INSTANCE_WR(ctx, 0x7C40/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x7CC0/4, 0x2); ++ INSTANCE_WR(ctx, 0x7CE0/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D00/4, 0x100); ++ INSTANCE_WR(ctx, 0x7D20/4, 0x1); ++ INSTANCE_WR(ctx, 0x11640/4, 0x4); ++ INSTANCE_WR(ctx, 0x11660/4, 0x4); ++ INSTANCE_WR(ctx, 0x49FE0/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A000/4, 0x4); ++ INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A040/4, 0x3); ++ INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A0E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x4A100/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x4A180/4, 0x27); ++ INSTANCE_WR(ctx, 0x4A1E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x51A20/4, 0x1); ++ INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x51F00/4, 0x80); ++ INSTANCE_WR(ctx, 0x51F80/4, 0x80); ++ INSTANCE_WR(ctx, 0x51FC0/4, 0x3F); ++ INSTANCE_WR(ctx, 0x52120/4, 0x2); ++ INSTANCE_WR(ctx, 0x52140/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52160/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x52280/4, 0x4); ++ INSTANCE_WR(ctx, 0x52300/4, 0x4); ++ INSTANCE_WR(ctx, 0x52540/4, 0x1); ++ INSTANCE_WR(ctx, 0x52560/4, 0x1001); ++ INSTANCE_WR(ctx, 0x52580/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x52C00/4, 0x10); ++ INSTANCE_WR(ctx, 0x52C60/4, 0x3); ++ INSTANCE_WR(ctx, 0xA84/4, 0xF); ++ INSTANCE_WR(ctx, 0xB24/4, 0x20); ++ INSTANCE_WR(ctx, 0xD04/4, 0x1A); ++ INSTANCE_WR(ctx, 0xEC4/4, 0x4); ++ INSTANCE_WR(ctx, 0xEE4/4, 0x4); ++ INSTANCE_WR(ctx, 0xF24/4, 0x4); ++ INSTANCE_WR(ctx, 0xF44/4, 0x8); ++ INSTANCE_WR(ctx, 0xF84/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x1124/4, 0xF); ++ INSTANCE_WR(ctx, 0x3604/4, 0xF); ++ INSTANCE_WR(ctx, 0x3644/4, 0x1); ++ INSTANCE_WR(ctx, 0x41A4/4, 0xF); ++ INSTANCE_WR(ctx, 0x14844/4, 0xF); ++ INSTANCE_WR(ctx, 0x14AE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14B04/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B24/4, 0x100); ++ INSTANCE_WR(ctx, 0x14B44/4, 0x11); ++ INSTANCE_WR(ctx, 0x14B84/4, 0x8); ++ INSTANCE_WR(ctx, 0x14C44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14C84/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CA4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CC4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14CE4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x14D04/4, 0x2); ++ INSTANCE_WR(ctx, 0x14DE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E24/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14E64/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F04/4, 0x4); ++ INSTANCE_WR(ctx, 0x14F44/4, 0x1); ++ INSTANCE_WR(ctx, 0x14F64/4, 0x15); ++ INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480); ++ INSTANCE_WR(ctx, 0x15764/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x15804/4, 0x100); ++ INSTANCE_WR(ctx, 0x15864/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x158C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x158E4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x15904/4, 0x1); ++ INSTANCE_WR(ctx, 0x15924/4, 0x4); ++ INSTANCE_WR(ctx, 0x15944/4, 0x2); ++ INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15C68/4, 0x4); ++ INSTANCE_WR(ctx, 0x15C88/4, 0x1A); ++ INSTANCE_WR(ctx, 0x15CE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x16028/4, 0xF); ++ INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16148/4, 0x11); ++ INSTANCE_WR(ctx, 0x16348/4, 0x4); ++ INSTANCE_WR(ctx, 0x163E8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16408/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x16428/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x164A8/4, 0x5); ++ INSTANCE_WR(ctx, 0x164C8/4, 0x52); ++ INSTANCE_WR(ctx, 0x16568/4, 0x1); ++ INSTANCE_WR(ctx, 0x16788/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16808/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16828/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16848/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16868/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16888/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16908/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16928/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16948/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16968/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16988/4, 0x10); ++ INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x16E88/4, 0x5); ++ INSTANCE_WR(ctx, 0x16EE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16FA8/4, 0x3); ++ INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x173C8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x17408/4, 0x3); ++ INSTANCE_WR(ctx, 0x178E8/4, 0x102); ++ INSTANCE_WR(ctx, 0x17928/4, 0x4); ++ INSTANCE_WR(ctx, 0x17948/4, 0x4); ++ INSTANCE_WR(ctx, 0x17968/4, 0x4); ++ INSTANCE_WR(ctx, 0x17988/4, 0x4); ++ INSTANCE_WR(ctx, 0x179A8/4, 0x4); ++ INSTANCE_WR(ctx, 0x179C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17A08/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17A48/4, 0x102); ++ INSTANCE_WR(ctx, 0x17B88/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BA8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x17BE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18228/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18288/4, 0x804); ++ INSTANCE_WR(ctx, 0x182C8/4, 0x4); ++ INSTANCE_WR(ctx, 0x182E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18308/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18348/4, 0x4); ++ INSTANCE_WR(ctx, 0x18368/4, 0x4); ++ INSTANCE_WR(ctx, 0x183A8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18448/4, 0x804); ++ INSTANCE_WR(ctx, 0x18468/4, 0x1); ++ INSTANCE_WR(ctx, 0x18488/4, 0x1A); ++ INSTANCE_WR(ctx, 0x184A8/4, 0x7F); ++ INSTANCE_WR(ctx, 0x184E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x18508/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18548/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18568/4, 0x4); ++ INSTANCE_WR(ctx, 0x18588/4, 0x4); ++ INSTANCE_WR(ctx, 0x185C8/4, 0x10); ++ INSTANCE_WR(ctx, 0x18648/4, 0x1); ++ INSTANCE_WR(ctx, 0x18668/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18748/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x18768/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18E88/4, 0x1); ++ INSTANCE_WR(ctx, 0x18EE8/4, 0x10); ++ INSTANCE_WR(ctx, 0x19608/4, 0x88); ++ INSTANCE_WR(ctx, 0x19628/4, 0x88); ++ INSTANCE_WR(ctx, 0x19688/4, 0x4); ++ INSTANCE_WR(ctx, 0x19968/4, 0x26); ++ INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x19A48/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19A68/4, 0x10); ++ INSTANCE_WR(ctx, 0x19F88/4, 0x52); ++ INSTANCE_WR(ctx, 0x19FC8/4, 0x26); ++ INSTANCE_WR(ctx, 0x1A008/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A028/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A068/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1A108/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A128/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A168/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A188/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x24A48/4, 0x4); ++ INSTANCE_WR(ctx, 0x24A68/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AA8/4, 0x80); ++ INSTANCE_WR(ctx, 0x24AC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x24AE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x24B28/4, 0x27); ++ INSTANCE_WR(ctx, 0x24B68/4, 0x26); ++ INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24C88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D08/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D28/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D48/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D68/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24D88/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0xB0C/4, 0x2); ++ INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0xCEC/4, 0x1); ++ INSTANCE_WR(ctx, 0xD0C/4, 0x10); ++ INSTANCE_WR(ctx, 0xD6C/4, 0x1); ++ INSTANCE_WR(ctx, 0xE0C/4, 0x4); ++ INSTANCE_WR(ctx, 0xE2C/4, 0x400); ++ INSTANCE_WR(ctx, 0xE4C/4, 0x300); ++ INSTANCE_WR(ctx, 0xE6C/4, 0x1001); ++ INSTANCE_WR(ctx, 0xE8C/4, 0x15); ++ INSTANCE_WR(ctx, 0xF4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x106C/4, 0x1); ++ INSTANCE_WR(ctx, 0x108C/4, 0x10); ++ INSTANCE_WR(ctx, 0x10CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x134C/4, 0x10); ++ INSTANCE_WR(ctx, 0x156C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x158C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x160C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x162C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x164C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x166C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x168C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x170C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x172C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x1A8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x1ACC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x1BAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1BEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1DCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x1ECC/4, 0xF); ++ INSTANCE_WR(ctx, 0x1FCC/4, 0x11); ++ INSTANCE_WR(ctx, 0x20AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x20EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x210C/4, 0x2); ++ INSTANCE_WR(ctx, 0x212C/4, 0x1); ++ INSTANCE_WR(ctx, 0x214C/4, 0x2); ++ INSTANCE_WR(ctx, 0x216C/4, 0x1); ++ INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x24AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x24CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x24EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x250C/4, 0x1); ++ INSTANCE_WR(ctx, 0x252C/4, 0x2); ++ INSTANCE_WR(ctx, 0x254C/4, 0x1); ++ INSTANCE_WR(ctx, 0x256C/4, 0x1); ++ INSTANCE_WR(ctx, 0x25EC/4, 0x11); ++ INSTANCE_WR(ctx, 0x260C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328C/4, 0x2); ++ INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x346C/4, 0x1); ++ INSTANCE_WR(ctx, 0x348C/4, 0x10); ++ INSTANCE_WR(ctx, 0x34EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x358C/4, 0x4); ++ INSTANCE_WR(ctx, 0x35AC/4, 0x400); ++ INSTANCE_WR(ctx, 0x35CC/4, 0x300); ++ INSTANCE_WR(ctx, 0x35EC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x360C/4, 0x15); ++ INSTANCE_WR(ctx, 0x36CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x37EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x380C/4, 0x10); ++ INSTANCE_WR(ctx, 0x384C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3ACC/4, 0x10); ++ INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x420C/4, 0x10); ++ INSTANCE_WR(ctx, 0x424C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x432C/4, 0x1); ++ INSTANCE_WR(ctx, 0x436C/4, 0x1); ++ INSTANCE_WR(ctx, 0x43AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x454C/4, 0x11); ++ INSTANCE_WR(ctx, 0x464C/4, 0xF); ++ INSTANCE_WR(ctx, 0x474C/4, 0x11); ++ INSTANCE_WR(ctx, 0x482C/4, 0x1); ++ INSTANCE_WR(ctx, 0x484C/4, 0x1); ++ INSTANCE_WR(ctx, 0x486C/4, 0x1); ++ INSTANCE_WR(ctx, 0x488C/4, 0x2); ++ INSTANCE_WR(ctx, 0x48AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x48CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x48EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x4C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x4C6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4C8C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CAC/4, 0x2); ++ INSTANCE_WR(ctx, 0x4CCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x4D6C/4, 0x11); ++ INSTANCE_WR(ctx, 0x4D8C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA30/4, 0x4); ++ INSTANCE_WR(ctx, 0xCF0/4, 0x4); ++ INSTANCE_WR(ctx, 0xD10/4, 0x4); ++ INSTANCE_WR(ctx, 0xD30/4, 0x608080); ++ INSTANCE_WR(ctx, 0xDD0/4, 0x4); ++ INSTANCE_WR(ctx, 0xE30/4, 0x4); ++ INSTANCE_WR(ctx, 0xE50/4, 0x4); ++ INSTANCE_WR(ctx, 0xE70/4, 0x80); ++ INSTANCE_WR(ctx, 0xE90/4, 0x1E00); ++ INSTANCE_WR(ctx, 0xEB0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1350/4, 0x4); ++ INSTANCE_WR(ctx, 0x1370/4, 0x80); ++ INSTANCE_WR(ctx, 0x1390/4, 0x4); ++ INSTANCE_WR(ctx, 0x13B0/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x13D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x13F0/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x1410/4, 0x4); ++ INSTANCE_WR(ctx, 0x14B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x14D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1550/4, 0x4); ++ INSTANCE_WR(ctx, 0x159F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15A10/4, 0x3); ++ INSTANCE_WR(ctx, 0x15C50/4, 0xF); ++ INSTANCE_WR(ctx, 0x15DD0/4, 0x4); ++ INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x15F70/4, 0x1); ++ INSTANCE_WR(ctx, 0x15FF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x160B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16250/4, 0x1); ++ INSTANCE_WR(ctx, 0x16270/4, 0x1); ++ INSTANCE_WR(ctx, 0x16290/4, 0x2); ++ INSTANCE_WR(ctx, 0x162B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x162F0/4, 0x2); ++ INSTANCE_WR(ctx, 0x16310/4, 0x1); ++ INSTANCE_WR(ctx, 0x16350/4, 0x11); ++ INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x164B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x16530/4, 0x11); ++ INSTANCE_WR(ctx, 0x16550/4, 0x1); ++ INSTANCE_WR(ctx, 0x16590/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165B0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x165D0/4, 0xCF); ++ INSTANCE_WR(ctx, 0x16730/4, 0x1); ++ INSTANCE_WR(ctx, 0x16750/4, 0x1); ++ INSTANCE_WR(ctx, 0x16770/4, 0x2); ++ INSTANCE_WR(ctx, 0x16790/4, 0x1); ++ INSTANCE_WR(ctx, 0x167B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x167D0/4, 0x2); ++ INSTANCE_WR(ctx, 0x167F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16830/4, 0x1); ++ INSTANCE_WR(ctx, 0x16850/4, 0x1); ++ INSTANCE_WR(ctx, 0x16870/4, 0x1); ++ INSTANCE_WR(ctx, 0x16890/4, 0x1); ++ INSTANCE_WR(ctx, 0x168B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x168F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16910/4, 0x1); ++ INSTANCE_WR(ctx, 0x16930/4, 0x11); ++ INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16A50/4, 0xF); ++ INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x16BB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16BD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16C50/4, 0x4); ++ INSTANCE_WR(ctx, 0x16D10/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB0/4, 0x11); ++ INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F30/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F50/4, 0x1); ++ INSTANCE_WR(ctx, 0x16F90/4, 0x1); ++ INSTANCE_WR(ctx, 0x16FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17010/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17050/4, 0x1); ++ INSTANCE_WR(ctx, 0x17090/4, 0x1); ++ INSTANCE_WR(ctx, 0x175F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x17610/4, 0x8); ++ INSTANCE_WR(ctx, 0x17630/4, 0x8); ++ INSTANCE_WR(ctx, 0x17650/4, 0x8); ++ INSTANCE_WR(ctx, 0x17670/4, 0x8); ++ INSTANCE_WR(ctx, 0x17690/4, 0x8); ++ INSTANCE_WR(ctx, 0x176B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x176F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17810/4, 0x400); ++ INSTANCE_WR(ctx, 0x17830/4, 0x400); ++ INSTANCE_WR(ctx, 0x17850/4, 0x400); ++ INSTANCE_WR(ctx, 0x17870/4, 0x400); ++ INSTANCE_WR(ctx, 0x17890/4, 0x400); ++ INSTANCE_WR(ctx, 0x178B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x178F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x17910/4, 0x300); ++ INSTANCE_WR(ctx, 0x17930/4, 0x300); ++ INSTANCE_WR(ctx, 0x17950/4, 0x300); ++ INSTANCE_WR(ctx, 0x17970/4, 0x300); ++ INSTANCE_WR(ctx, 0x17990/4, 0x300); ++ INSTANCE_WR(ctx, 0x179B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x179F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x17A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A30/4, 0xF); ++ INSTANCE_WR(ctx, 0x17B30/4, 0x20); ++ INSTANCE_WR(ctx, 0x17B50/4, 0x11); ++ INSTANCE_WR(ctx, 0x17B70/4, 0x100); ++ INSTANCE_WR(ctx, 0x17BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17C10/4, 0x40); ++ INSTANCE_WR(ctx, 0x17C30/4, 0x100); ++ INSTANCE_WR(ctx, 0x17C70/4, 0x3); ++ INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17D90/4, 0x2); ++ INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17EF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17F90/4, 0x4); ++ INSTANCE_WR(ctx, 0x17FD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17FF0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18010/4, 0x300); ++ INSTANCE_WR(ctx, 0x18030/4, 0x1001); ++ INSTANCE_WR(ctx, 0x180B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x181D0/4, 0xF); ++ INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18550/4, 0x11); ++ INSTANCE_WR(ctx, 0x185B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x185F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18610/4, 0x1); ++ INSTANCE_WR(ctx, 0x18690/4, 0x1); ++ INSTANCE_WR(ctx, 0x18730/4, 0x1); ++ INSTANCE_WR(ctx, 0x18770/4, 0x1); ++ INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x18830/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x18850/4, 0x40); ++ INSTANCE_WR(ctx, 0x18870/4, 0x100); ++ INSTANCE_WR(ctx, 0x18890/4, 0x10100); ++ INSTANCE_WR(ctx, 0x188B0/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18B50/4, 0x1); ++ INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18BB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x18D30/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D70/4, 0x1); ++ INSTANCE_WR(ctx, 0x18D90/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x18E30/4, 0x1A); ++} ++ ++static void ++nv92_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x10C/4, 0x30); ++ INSTANCE_WR(ctx, 0x1D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x218/4, 0xFE0C); ++ INSTANCE_WR(ctx, 0x22C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x258/4, 0x187); ++ INSTANCE_WR(ctx, 0x26C/4, 0x1018); ++ INSTANCE_WR(ctx, 0x270/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2AC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2B0/4, 0x42500DF); ++ INSTANCE_WR(ctx, 0x2B8/4, 0x600); ++ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); ++ INSTANCE_WR(ctx, 0x2D4/4, 0xFF); ++ INSTANCE_WR(ctx, 0x2DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x2F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F8/4, 0x80); ++ INSTANCE_WR(ctx, 0x2FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x318/4, 0x2); ++ INSTANCE_WR(ctx, 0x31C/4, 0x1); ++ INSTANCE_WR(ctx, 0x328/4, 0x1); ++ INSTANCE_WR(ctx, 0x32C/4, 0x100); ++ INSTANCE_WR(ctx, 0x344/4, 0x2); ++ INSTANCE_WR(ctx, 0x348/4, 0x1); ++ INSTANCE_WR(ctx, 0x34C/4, 0x1); ++ INSTANCE_WR(ctx, 0x35C/4, 0x1); ++ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x364/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x36C/4, 0x1); ++ INSTANCE_WR(ctx, 0x370/4, 0x1); ++ INSTANCE_WR(ctx, 0x378/4, 0x1); ++ INSTANCE_WR(ctx, 0x37C/4, 0x1); ++ INSTANCE_WR(ctx, 0x380/4, 0x1); ++ INSTANCE_WR(ctx, 0x384/4, 0x4); ++ INSTANCE_WR(ctx, 0x388/4, 0x1); ++ INSTANCE_WR(ctx, 0x38C/4, 0x1); ++ INSTANCE_WR(ctx, 0x390/4, 0x1); ++ INSTANCE_WR(ctx, 0x394/4, 0x7); ++ INSTANCE_WR(ctx, 0x398/4, 0x1); ++ INSTANCE_WR(ctx, 0x39C/4, 0x7); ++ INSTANCE_WR(ctx, 0x3A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3C0/4, 0x100); ++ INSTANCE_WR(ctx, 0x3C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D4/4, 0x100); ++ INSTANCE_WR(ctx, 0x3D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x3DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x404/4, 0x4); ++ INSTANCE_WR(ctx, 0x408/4, 0x70); ++ INSTANCE_WR(ctx, 0x40C/4, 0x80); ++ INSTANCE_WR(ctx, 0x420/4, 0xC); ++ INSTANCE_WR(ctx, 0x428/4, 0x8); ++ INSTANCE_WR(ctx, 0x42C/4, 0x14); ++ INSTANCE_WR(ctx, 0x434/4, 0x29); ++ INSTANCE_WR(ctx, 0x438/4, 0x27); ++ INSTANCE_WR(ctx, 0x43C/4, 0x26); ++ INSTANCE_WR(ctx, 0x440/4, 0x8); ++ INSTANCE_WR(ctx, 0x444/4, 0x4); ++ INSTANCE_WR(ctx, 0x448/4, 0x27); ++ INSTANCE_WR(ctx, 0x454/4, 0x1); ++ INSTANCE_WR(ctx, 0x458/4, 0x2); ++ INSTANCE_WR(ctx, 0x45C/4, 0x3); ++ INSTANCE_WR(ctx, 0x460/4, 0x4); ++ INSTANCE_WR(ctx, 0x464/4, 0x5); ++ INSTANCE_WR(ctx, 0x468/4, 0x6); ++ INSTANCE_WR(ctx, 0x46C/4, 0x7); ++ INSTANCE_WR(ctx, 0x470/4, 0x1); ++ INSTANCE_WR(ctx, 0x4B4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x4E4/4, 0x80); ++ INSTANCE_WR(ctx, 0x4E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x4EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x4F0/4, 0x3); ++ INSTANCE_WR(ctx, 0x4F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x500/4, 0x12); ++ INSTANCE_WR(ctx, 0x504/4, 0x10); ++ INSTANCE_WR(ctx, 0x508/4, 0xC); ++ INSTANCE_WR(ctx, 0x50C/4, 0x1); ++ INSTANCE_WR(ctx, 0x51C/4, 0x4); ++ INSTANCE_WR(ctx, 0x520/4, 0x2); ++ INSTANCE_WR(ctx, 0x524/4, 0x4); ++ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x534/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x55C/4, 0x4); ++ INSTANCE_WR(ctx, 0x560/4, 0x14); ++ INSTANCE_WR(ctx, 0x564/4, 0x1); ++ INSTANCE_WR(ctx, 0x570/4, 0x2); ++ INSTANCE_WR(ctx, 0x57C/4, 0x1); ++ INSTANCE_WR(ctx, 0x584/4, 0x2); ++ INSTANCE_WR(ctx, 0x588/4, 0x1000); ++ INSTANCE_WR(ctx, 0x58C/4, 0xE00); ++ INSTANCE_WR(ctx, 0x590/4, 0x1000); ++ INSTANCE_WR(ctx, 0x594/4, 0x1E00); ++ INSTANCE_WR(ctx, 0x59C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BC/4, 0x200); ++ INSTANCE_WR(ctx, 0x5C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C8/4, 0x70); ++ INSTANCE_WR(ctx, 0x5CC/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC/4, 0x70); ++ INSTANCE_WR(ctx, 0x5E0/4, 0x80); ++ INSTANCE_WR(ctx, 0x5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5F4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x60C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x614/4, 0x2); ++ INSTANCE_WR(ctx, 0x61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x624/4, 0x1); ++ INSTANCE_WR(ctx, 0x62C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x630/4, 0xCF); ++ INSTANCE_WR(ctx, 0x634/4, 0x1); ++ INSTANCE_WR(ctx, 0x63C/4, 0x1F80); ++ INSTANCE_WR(ctx, 0x654/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x658/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x660/4, 0x1000); ++ INSTANCE_WR(ctx, 0x664/4, 0x1F); ++ INSTANCE_WR(ctx, 0x668/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x66C/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x670/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x67C/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x680/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x688/4, 0x1000); ++ INSTANCE_WR(ctx, 0x68C/4, 0x1F); ++ INSTANCE_WR(ctx, 0x690/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x694/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x698/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6A4/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6A8/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6B0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6B4/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6B8/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6BC/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x6C0/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6CC/4, 0x3B74F821); ++ INSTANCE_WR(ctx, 0x6D0/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x6D8/4, 0x1000); ++ INSTANCE_WR(ctx, 0x6DC/4, 0x1F); ++ INSTANCE_WR(ctx, 0x6E0/4, 0x27C10FA); ++ INSTANCE_WR(ctx, 0x6E4/4, 0x400000C0); ++ INSTANCE_WR(ctx, 0x6E8/4, 0xB7892080); ++ INSTANCE_WR(ctx, 0x6F4/4, 0x390040); ++ INSTANCE_WR(ctx, 0x6FC/4, 0x22); ++ INSTANCE_WR(ctx, 0x708/4, 0x390040); ++ INSTANCE_WR(ctx, 0x70C/4, 0x22); ++ INSTANCE_WR(ctx, 0x724/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x728/4, 0x160000); ++ INSTANCE_WR(ctx, 0x72C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x73C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x740/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x764/4, 0x10401); ++ INSTANCE_WR(ctx, 0x76C/4, 0x78); ++ INSTANCE_WR(ctx, 0x774/4, 0xBF); ++ INSTANCE_WR(ctx, 0x77C/4, 0x1210); ++ INSTANCE_WR(ctx, 0x780/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x7A4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7A8/4, 0x160000); ++ INSTANCE_WR(ctx, 0x7AC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x7BC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x7C0/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x7E4/4, 0x10401); ++ INSTANCE_WR(ctx, 0x7EC/4, 0x78); ++ INSTANCE_WR(ctx, 0x7F4/4, 0xBF); ++ INSTANCE_WR(ctx, 0x7FC/4, 0x1210); ++ INSTANCE_WR(ctx, 0x800/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x828/4, 0x27070); ++ INSTANCE_WR(ctx, 0x834/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x84C/4, 0x120407); ++ INSTANCE_WR(ctx, 0x850/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x854/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x858/4, 0x30201); ++ INSTANCE_WR(ctx, 0x874/4, 0x40); ++ INSTANCE_WR(ctx, 0x878/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x87C/4, 0x141210); ++ INSTANCE_WR(ctx, 0x880/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x884/4, 0x1); ++ INSTANCE_WR(ctx, 0x888/4, 0x3); ++ INSTANCE_WR(ctx, 0x894/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x898/4, 0x100); ++ INSTANCE_WR(ctx, 0x89C/4, 0x3800); ++ INSTANCE_WR(ctx, 0x8A0/4, 0x404040); ++ INSTANCE_WR(ctx, 0x8A4/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x8AC/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x8B0/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x8C0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x8C4/4, 0x160000); ++ INSTANCE_WR(ctx, 0x8C8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x8D8/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x8DC/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x900/4, 0x10401); ++ INSTANCE_WR(ctx, 0x908/4, 0x78); ++ INSTANCE_WR(ctx, 0x910/4, 0xBF); ++ INSTANCE_WR(ctx, 0x918/4, 0x1210); ++ INSTANCE_WR(ctx, 0x91C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x940/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x944/4, 0x160000); ++ INSTANCE_WR(ctx, 0x948/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x958/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x95C/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x980/4, 0x10401); ++ INSTANCE_WR(ctx, 0x988/4, 0x78); ++ INSTANCE_WR(ctx, 0x990/4, 0xBF); ++ INSTANCE_WR(ctx, 0x998/4, 0x1210); ++ INSTANCE_WR(ctx, 0x99C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x9C4/4, 0x27070); ++ INSTANCE_WR(ctx, 0x9D0/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x9E8/4, 0x120407); ++ INSTANCE_WR(ctx, 0x9EC/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x9F0/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x9F4/4, 0x30201); ++ INSTANCE_WR(ctx, 0xA10/4, 0x40); ++ INSTANCE_WR(ctx, 0xA14/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xA18/4, 0x141210); ++ INSTANCE_WR(ctx, 0xA1C/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xA20/4, 0x1); ++ INSTANCE_WR(ctx, 0xA24/4, 0x3); ++ INSTANCE_WR(ctx, 0xA30/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xA34/4, 0x100); ++ INSTANCE_WR(ctx, 0xA38/4, 0x3800); ++ INSTANCE_WR(ctx, 0xA3C/4, 0x404040); ++ INSTANCE_WR(ctx, 0xA40/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xA48/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xA4C/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xA5C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xA60/4, 0x160000); ++ INSTANCE_WR(ctx, 0xA64/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xA74/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xA78/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xA9C/4, 0x10401); ++ INSTANCE_WR(ctx, 0xAA4/4, 0x78); ++ INSTANCE_WR(ctx, 0xAAC/4, 0xBF); ++ INSTANCE_WR(ctx, 0xAB4/4, 0x1210); ++ INSTANCE_WR(ctx, 0xAB8/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xADC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xAE0/4, 0x160000); ++ INSTANCE_WR(ctx, 0xAE4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xAF4/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xAF8/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xB1C/4, 0x10401); ++ INSTANCE_WR(ctx, 0xB24/4, 0x78); ++ INSTANCE_WR(ctx, 0xB2C/4, 0xBF); ++ INSTANCE_WR(ctx, 0xB34/4, 0x1210); ++ INSTANCE_WR(ctx, 0xB38/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xB60/4, 0x27070); ++ INSTANCE_WR(ctx, 0xB6C/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xB84/4, 0x120407); ++ INSTANCE_WR(ctx, 0xB88/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xB8C/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xB90/4, 0x30201); ++ INSTANCE_WR(ctx, 0xBAC/4, 0x40); ++ INSTANCE_WR(ctx, 0xBB0/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xBB4/4, 0x141210); ++ INSTANCE_WR(ctx, 0xBB8/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xBBC/4, 0x1); ++ INSTANCE_WR(ctx, 0xBC0/4, 0x3); ++ INSTANCE_WR(ctx, 0xBCC/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xBD0/4, 0x100); ++ INSTANCE_WR(ctx, 0xBD4/4, 0x3800); ++ INSTANCE_WR(ctx, 0xBD8/4, 0x404040); ++ INSTANCE_WR(ctx, 0xBDC/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xBE4/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xBE8/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xBF8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xBFC/4, 0x160000); ++ INSTANCE_WR(ctx, 0xC00/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC10/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xC14/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xC38/4, 0x10401); ++ INSTANCE_WR(ctx, 0xC40/4, 0x78); ++ INSTANCE_WR(ctx, 0xC48/4, 0xBF); ++ INSTANCE_WR(ctx, 0xC50/4, 0x1210); ++ INSTANCE_WR(ctx, 0xC54/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xC78/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC7C/4, 0x160000); ++ INSTANCE_WR(ctx, 0xC80/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xC90/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xC94/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xCB8/4, 0x10401); ++ INSTANCE_WR(ctx, 0xCC0/4, 0x78); ++ INSTANCE_WR(ctx, 0xCC8/4, 0xBF); ++ INSTANCE_WR(ctx, 0xCD0/4, 0x1210); ++ INSTANCE_WR(ctx, 0xCD4/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xCFC/4, 0x27070); ++ INSTANCE_WR(ctx, 0xD08/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xD20/4, 0x120407); ++ INSTANCE_WR(ctx, 0xD24/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xD28/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xD2C/4, 0x30201); ++ INSTANCE_WR(ctx, 0xD48/4, 0x40); ++ INSTANCE_WR(ctx, 0xD4C/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xD50/4, 0x141210); ++ INSTANCE_WR(ctx, 0xD54/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xD58/4, 0x1); ++ INSTANCE_WR(ctx, 0xD5C/4, 0x3); ++ INSTANCE_WR(ctx, 0xD68/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xD6C/4, 0x100); ++ INSTANCE_WR(ctx, 0xD70/4, 0x3800); ++ INSTANCE_WR(ctx, 0xD74/4, 0x404040); ++ INSTANCE_WR(ctx, 0xD78/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xD80/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xD84/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xD94/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xD98/4, 0x160000); ++ INSTANCE_WR(ctx, 0xD9C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xDAC/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xDB0/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xDD4/4, 0x10401); ++ INSTANCE_WR(ctx, 0xDDC/4, 0x78); ++ INSTANCE_WR(ctx, 0xDE4/4, 0xBF); ++ INSTANCE_WR(ctx, 0xDEC/4, 0x1210); ++ INSTANCE_WR(ctx, 0xDF0/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xE14/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xE18/4, 0x160000); ++ INSTANCE_WR(ctx, 0xE1C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xE2C/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xE30/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xE54/4, 0x10401); ++ INSTANCE_WR(ctx, 0xE5C/4, 0x78); ++ INSTANCE_WR(ctx, 0xE64/4, 0xBF); ++ INSTANCE_WR(ctx, 0xE6C/4, 0x1210); ++ INSTANCE_WR(ctx, 0xE70/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xE98/4, 0x27070); ++ INSTANCE_WR(ctx, 0xEA4/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0xEBC/4, 0x120407); ++ INSTANCE_WR(ctx, 0xEC0/4, 0x5091507); ++ INSTANCE_WR(ctx, 0xEC4/4, 0x5010202); ++ INSTANCE_WR(ctx, 0xEC8/4, 0x30201); ++ INSTANCE_WR(ctx, 0xEE4/4, 0x40); ++ INSTANCE_WR(ctx, 0xEE8/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0xEEC/4, 0x141210); ++ INSTANCE_WR(ctx, 0xEF0/4, 0x1F0); ++ INSTANCE_WR(ctx, 0xEF4/4, 0x1); ++ INSTANCE_WR(ctx, 0xEF8/4, 0x3); ++ INSTANCE_WR(ctx, 0xF04/4, 0x39E00); ++ INSTANCE_WR(ctx, 0xF08/4, 0x100); ++ INSTANCE_WR(ctx, 0xF0C/4, 0x3800); ++ INSTANCE_WR(ctx, 0xF10/4, 0x404040); ++ INSTANCE_WR(ctx, 0xF14/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0xF1C/4, 0x77F005); ++ INSTANCE_WR(ctx, 0xF20/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0xF30/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xF34/4, 0x160000); ++ INSTANCE_WR(ctx, 0xF38/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xF48/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xF4C/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xF70/4, 0x10401); ++ INSTANCE_WR(ctx, 0xF78/4, 0x78); ++ INSTANCE_WR(ctx, 0xF80/4, 0xBF); ++ INSTANCE_WR(ctx, 0xF88/4, 0x1210); ++ INSTANCE_WR(ctx, 0xF8C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0xFB0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xFB4/4, 0x160000); ++ INSTANCE_WR(ctx, 0xFB8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0xFC8/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0xFCC/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0xFF0/4, 0x10401); ++ INSTANCE_WR(ctx, 0xFF8/4, 0x78); ++ INSTANCE_WR(ctx, 0x1000/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1008/4, 0x1210); ++ INSTANCE_WR(ctx, 0x100C/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x1034/4, 0x27070); ++ INSTANCE_WR(ctx, 0x1040/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x1058/4, 0x120407); ++ INSTANCE_WR(ctx, 0x105C/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x1060/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x1064/4, 0x30201); ++ INSTANCE_WR(ctx, 0x1080/4, 0x40); ++ INSTANCE_WR(ctx, 0x1084/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x1088/4, 0x141210); ++ INSTANCE_WR(ctx, 0x108C/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x1090/4, 0x1); ++ INSTANCE_WR(ctx, 0x1094/4, 0x3); ++ INSTANCE_WR(ctx, 0x10A0/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x10A4/4, 0x100); ++ INSTANCE_WR(ctx, 0x10A8/4, 0x3800); ++ INSTANCE_WR(ctx, 0x10AC/4, 0x404040); ++ INSTANCE_WR(ctx, 0x10B0/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x10B8/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x10BC/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x10CC/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x10D0/4, 0x160000); ++ INSTANCE_WR(ctx, 0x10D4/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x10E4/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x10E8/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x110C/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1114/4, 0x78); ++ INSTANCE_WR(ctx, 0x111C/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1124/4, 0x1210); ++ INSTANCE_WR(ctx, 0x1128/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x114C/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1150/4, 0x160000); ++ INSTANCE_WR(ctx, 0x1154/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1164/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1168/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x118C/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1194/4, 0x78); ++ INSTANCE_WR(ctx, 0x119C/4, 0xBF); ++ INSTANCE_WR(ctx, 0x11A4/4, 0x1210); ++ INSTANCE_WR(ctx, 0x11A8/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x11D0/4, 0x27070); ++ INSTANCE_WR(ctx, 0x11DC/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x11F4/4, 0x120407); ++ INSTANCE_WR(ctx, 0x11F8/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x11FC/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x1200/4, 0x30201); ++ INSTANCE_WR(ctx, 0x121C/4, 0x40); ++ INSTANCE_WR(ctx, 0x1220/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x1224/4, 0x141210); ++ INSTANCE_WR(ctx, 0x1228/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x122C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1230/4, 0x3); ++ INSTANCE_WR(ctx, 0x123C/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x1240/4, 0x100); ++ INSTANCE_WR(ctx, 0x1244/4, 0x3800); ++ INSTANCE_WR(ctx, 0x1248/4, 0x404040); ++ INSTANCE_WR(ctx, 0x124C/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x1254/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x1258/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x1268/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x126C/4, 0x160000); ++ INSTANCE_WR(ctx, 0x1270/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1280/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1284/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x12A8/4, 0x10401); ++ INSTANCE_WR(ctx, 0x12B0/4, 0x78); ++ INSTANCE_WR(ctx, 0x12B8/4, 0xBF); ++ INSTANCE_WR(ctx, 0x12C0/4, 0x1210); ++ INSTANCE_WR(ctx, 0x12C4/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x12E8/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x12EC/4, 0x160000); ++ INSTANCE_WR(ctx, 0x12F0/4, 0x1800000); ++ INSTANCE_WR(ctx, 0x1300/4, 0x3FFFF); ++ INSTANCE_WR(ctx, 0x1304/4, 0x118C0000); ++ INSTANCE_WR(ctx, 0x1328/4, 0x10401); ++ INSTANCE_WR(ctx, 0x1330/4, 0x78); ++ INSTANCE_WR(ctx, 0x1338/4, 0xBF); ++ INSTANCE_WR(ctx, 0x1340/4, 0x1210); ++ INSTANCE_WR(ctx, 0x1344/4, 0x8000080); ++ INSTANCE_WR(ctx, 0x136C/4, 0x27070); ++ INSTANCE_WR(ctx, 0x1378/4, 0x3FFFFFF); ++ INSTANCE_WR(ctx, 0x1390/4, 0x120407); ++ INSTANCE_WR(ctx, 0x1394/4, 0x5091507); ++ INSTANCE_WR(ctx, 0x1398/4, 0x5010202); ++ INSTANCE_WR(ctx, 0x139C/4, 0x30201); ++ INSTANCE_WR(ctx, 0x13B8/4, 0x40); ++ INSTANCE_WR(ctx, 0x13BC/4, 0xD0C0B0A); ++ INSTANCE_WR(ctx, 0x13C0/4, 0x141210); ++ INSTANCE_WR(ctx, 0x13C4/4, 0x1F0); ++ INSTANCE_WR(ctx, 0x13C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x13CC/4, 0x3); ++ INSTANCE_WR(ctx, 0x13D8/4, 0x39E00); ++ INSTANCE_WR(ctx, 0x13DC/4, 0x100); ++ INSTANCE_WR(ctx, 0x13E0/4, 0x3800); ++ INSTANCE_WR(ctx, 0x13E4/4, 0x404040); ++ INSTANCE_WR(ctx, 0x13E8/4, 0xFF0A); ++ INSTANCE_WR(ctx, 0x13F0/4, 0x77F005); ++ INSTANCE_WR(ctx, 0x13F4/4, 0x3F7FFF); ++ INSTANCE_WR(ctx, 0x8620/4, 0x21); ++ INSTANCE_WR(ctx, 0x8640/4, 0x1); ++ INSTANCE_WR(ctx, 0x8660/4, 0x2); ++ INSTANCE_WR(ctx, 0x8680/4, 0x100); ++ INSTANCE_WR(ctx, 0x86A0/4, 0x100); ++ INSTANCE_WR(ctx, 0x86C0/4, 0x1); ++ INSTANCE_WR(ctx, 0x8720/4, 0x1); ++ INSTANCE_WR(ctx, 0x8740/4, 0x2); ++ INSTANCE_WR(ctx, 0x8760/4, 0x100); ++ INSTANCE_WR(ctx, 0x8780/4, 0x100); ++ INSTANCE_WR(ctx, 0x87A0/4, 0x1); ++ INSTANCE_WR(ctx, 0x1B8C0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1B8E0/4, 0x4); ++ INSTANCE_WR(ctx, 0x54260/4, 0x4); ++ INSTANCE_WR(ctx, 0x54280/4, 0x4); ++ INSTANCE_WR(ctx, 0x542A0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x542C0/4, 0x3); ++ INSTANCE_WR(ctx, 0x54300/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x54340/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x54360/4, 0x1); ++ INSTANCE_WR(ctx, 0x54380/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x543E0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x54400/4, 0x27); ++ INSTANCE_WR(ctx, 0x54460/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BCA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5BF80/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5C120/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C140/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C180/4, 0x80); ++ INSTANCE_WR(ctx, 0x5C200/4, 0x80); ++ INSTANCE_WR(ctx, 0x5C240/4, 0x3F); ++ INSTANCE_WR(ctx, 0x5C3A0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5C3C0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C3E0/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x5C500/4, 0x4); ++ INSTANCE_WR(ctx, 0x5C580/4, 0x4); ++ INSTANCE_WR(ctx, 0x5C7C0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5C7E0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x5C800/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C820/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C840/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5C860/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5CC80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CCE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CD80/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDA0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDC0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CDE0/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE00/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE20/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE40/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE60/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x5CE80/4, 0x10); ++ INSTANCE_WR(ctx, 0x5CEE0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1584/4, 0xF); ++ INSTANCE_WR(ctx, 0x1624/4, 0x20); ++ INSTANCE_WR(ctx, 0x1804/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19C4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19E4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A24/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A44/4, 0x8); ++ INSTANCE_WR(ctx, 0x1A84/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x1C24/4, 0xF); ++ INSTANCE_WR(ctx, 0x4104/4, 0xF); ++ INSTANCE_WR(ctx, 0x4144/4, 0x1); ++ INSTANCE_WR(ctx, 0x4CA4/4, 0xF); ++ INSTANCE_WR(ctx, 0x15344/4, 0xF); ++ INSTANCE_WR(ctx, 0x155E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x15604/4, 0x100); ++ INSTANCE_WR(ctx, 0x15624/4, 0x100); ++ INSTANCE_WR(ctx, 0x15644/4, 0x11); ++ INSTANCE_WR(ctx, 0x15684/4, 0x8); ++ INSTANCE_WR(ctx, 0x15744/4, 0x1); ++ INSTANCE_WR(ctx, 0x15784/4, 0x1); ++ INSTANCE_WR(ctx, 0x157A4/4, 0x1); ++ INSTANCE_WR(ctx, 0x157C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x157E4/4, 0xCF); ++ INSTANCE_WR(ctx, 0x15804/4, 0x2); ++ INSTANCE_WR(ctx, 0x158E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x15924/4, 0x1); ++ INSTANCE_WR(ctx, 0x15944/4, 0x1); ++ INSTANCE_WR(ctx, 0x15964/4, 0x1); ++ INSTANCE_WR(ctx, 0x15A04/4, 0x4); ++ INSTANCE_WR(ctx, 0x15A44/4, 0x1); ++ INSTANCE_WR(ctx, 0x15A64/4, 0x15); ++ INSTANCE_WR(ctx, 0x15AE4/4, 0x4444480); ++ INSTANCE_WR(ctx, 0x16264/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x16304/4, 0x100); ++ INSTANCE_WR(ctx, 0x16364/4, 0x10001); ++ INSTANCE_WR(ctx, 0x163A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x163C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x163E4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x16404/4, 0x1); ++ INSTANCE_WR(ctx, 0x16424/4, 0x4); ++ INSTANCE_WR(ctx, 0x16444/4, 0x2); ++ INSTANCE_WR(ctx, 0x183C4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x183E4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18484/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18604/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x18624/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x16508/4, 0x3FFFFF); ++ INSTANCE_WR(ctx, 0x16568/4, 0x1FFF); ++ INSTANCE_WR(ctx, 0x16748/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x16828/4, 0x4); ++ INSTANCE_WR(ctx, 0x16848/4, 0x1A); ++ INSTANCE_WR(ctx, 0x168A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B08/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x16BE8/4, 0xF); ++ INSTANCE_WR(ctx, 0x16CE8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16D08/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F08/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FA8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16FC8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x16FE8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x17068/4, 0x5); ++ INSTANCE_WR(ctx, 0x17088/4, 0x52); ++ INSTANCE_WR(ctx, 0x17128/4, 0x1); ++ INSTANCE_WR(ctx, 0x17348/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17368/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17388/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x173E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17408/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17428/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17448/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17468/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17488/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174A8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174C8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x174E8/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17508/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17528/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x17548/4, 0x10); ++ INSTANCE_WR(ctx, 0x17A28/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x17A48/4, 0x5); ++ INSTANCE_WR(ctx, 0x17AA8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AE8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B08/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B28/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B48/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x17B68/4, 0x3); ++ INSTANCE_WR(ctx, 0x17F68/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x17F88/4, 0x1A); ++ INSTANCE_WR(ctx, 0x17FC8/4, 0x3); ++ INSTANCE_WR(ctx, 0x184A8/4, 0x102); ++ INSTANCE_WR(ctx, 0x184E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18508/4, 0x4); ++ INSTANCE_WR(ctx, 0x18528/4, 0x4); ++ INSTANCE_WR(ctx, 0x18548/4, 0x4); ++ INSTANCE_WR(ctx, 0x18568/4, 0x4); ++ INSTANCE_WR(ctx, 0x18588/4, 0x4); ++ INSTANCE_WR(ctx, 0x185C8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x18608/4, 0x102); ++ INSTANCE_WR(ctx, 0x18748/4, 0x4); ++ INSTANCE_WR(ctx, 0x18768/4, 0x4); ++ INSTANCE_WR(ctx, 0x18788/4, 0x4); ++ INSTANCE_WR(ctx, 0x187A8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18DE8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x18E48/4, 0x804); ++ INSTANCE_WR(ctx, 0x18E88/4, 0x4); ++ INSTANCE_WR(ctx, 0x18EA8/4, 0x4); ++ INSTANCE_WR(ctx, 0x18EC8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x18F08/4, 0x4); ++ INSTANCE_WR(ctx, 0x18F28/4, 0x4); ++ INSTANCE_WR(ctx, 0x18F68/4, 0x10); ++ INSTANCE_WR(ctx, 0x19008/4, 0x804); ++ INSTANCE_WR(ctx, 0x19028/4, 0x1); ++ INSTANCE_WR(ctx, 0x19048/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19068/4, 0x7F); ++ INSTANCE_WR(ctx, 0x190A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x190C8/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x19108/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x19128/4, 0x4); ++ INSTANCE_WR(ctx, 0x19148/4, 0x4); ++ INSTANCE_WR(ctx, 0x19188/4, 0x10); ++ INSTANCE_WR(ctx, 0x19208/4, 0x1); ++ INSTANCE_WR(ctx, 0x19228/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x19308/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x19328/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x19A48/4, 0x1); ++ INSTANCE_WR(ctx, 0x19AA8/4, 0x10); ++ INSTANCE_WR(ctx, 0x1A1C8/4, 0x88); ++ INSTANCE_WR(ctx, 0x1A1E8/4, 0x88); ++ INSTANCE_WR(ctx, 0x1A248/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A528/4, 0x26); ++ INSTANCE_WR(ctx, 0x1A588/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x1A608/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1A628/4, 0x10); ++ INSTANCE_WR(ctx, 0x1AB48/4, 0x52); ++ INSTANCE_WR(ctx, 0x1AB88/4, 0x26); ++ INSTANCE_WR(ctx, 0x1ABC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1ABE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AC28/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1AC88/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1ACC8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1ACE8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AD28/4, 0x80); ++ INSTANCE_WR(ctx, 0x1AD48/4, 0x4); ++ INSTANCE_WR(ctx, 0x1AD68/4, 0x80C14); ++ INSTANCE_WR(ctx, 0x1ADA8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2D608/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D628/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D668/4, 0x80); ++ INSTANCE_WR(ctx, 0x2D688/4, 0x4); ++ INSTANCE_WR(ctx, 0x2D6A8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2D6E8/4, 0x27); ++ INSTANCE_WR(ctx, 0x2D728/4, 0x26); ++ INSTANCE_WR(ctx, 0x2D7A8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D7C8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D7E8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D808/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D828/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D848/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D868/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D888/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8A8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8C8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D8E8/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D908/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D928/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D948/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D968/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2D988/4, 0x4000000); ++ INSTANCE_WR(ctx, 0x2DE28/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x2DE48/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x2DEA8/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x160C/4, 0x2); ++ INSTANCE_WR(ctx, 0x164C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x180C/4, 0x10); ++ INSTANCE_WR(ctx, 0x186C/4, 0x1); ++ INSTANCE_WR(ctx, 0x190C/4, 0x4); ++ INSTANCE_WR(ctx, 0x192C/4, 0x400); ++ INSTANCE_WR(ctx, 0x194C/4, 0x300); ++ INSTANCE_WR(ctx, 0x196C/4, 0x1001); ++ INSTANCE_WR(ctx, 0x198C/4, 0x15); ++ INSTANCE_WR(ctx, 0x1A4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1B6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1B8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x1BCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1E4C/4, 0x10); ++ INSTANCE_WR(ctx, 0x206C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x208C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x20EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x210C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x212C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x214C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x216C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x218C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x21EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x220C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x222C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x224C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x258C/4, 0x10); ++ INSTANCE_WR(ctx, 0x25CC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x26AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x26EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x272C/4, 0x1); ++ INSTANCE_WR(ctx, 0x28CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x29CC/4, 0xF); ++ INSTANCE_WR(ctx, 0x2ACC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2BAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2BCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2BEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2C0C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2C2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2C4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2C6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2CAC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2CEC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FCC/4, 0x2); ++ INSTANCE_WR(ctx, 0x2FEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x300C/4, 0x1); ++ INSTANCE_WR(ctx, 0x302C/4, 0x2); ++ INSTANCE_WR(ctx, 0x304C/4, 0x1); ++ INSTANCE_WR(ctx, 0x306C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EC/4, 0x11); ++ INSTANCE_WR(ctx, 0x310C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3D8C/4, 0x2); ++ INSTANCE_WR(ctx, 0x3DCC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x3F6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x3F8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x3FEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x408C/4, 0x4); ++ INSTANCE_WR(ctx, 0x40AC/4, 0x400); ++ INSTANCE_WR(ctx, 0x40CC/4, 0x300); ++ INSTANCE_WR(ctx, 0x40EC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x410C/4, 0x15); ++ INSTANCE_WR(ctx, 0x41CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x42EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x430C/4, 0x10); ++ INSTANCE_WR(ctx, 0x434C/4, 0x1); ++ INSTANCE_WR(ctx, 0x45CC/4, 0x10); ++ INSTANCE_WR(ctx, 0x47EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x480C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x482C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x484C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x486C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x488C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x48EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x490C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x492C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x494C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x496C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x498C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x49AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x49CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x4D0C/4, 0x10); ++ INSTANCE_WR(ctx, 0x4D4C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x4E2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4E6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x4EAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x504C/4, 0x11); ++ INSTANCE_WR(ctx, 0x514C/4, 0xF); ++ INSTANCE_WR(ctx, 0x524C/4, 0x11); ++ INSTANCE_WR(ctx, 0x532C/4, 0x1); ++ INSTANCE_WR(ctx, 0x534C/4, 0x1); ++ INSTANCE_WR(ctx, 0x536C/4, 0x1); ++ INSTANCE_WR(ctx, 0x538C/4, 0x2); ++ INSTANCE_WR(ctx, 0x53AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x53CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x53EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x542C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x546C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x572C/4, 0x1); ++ INSTANCE_WR(ctx, 0x574C/4, 0x2); ++ INSTANCE_WR(ctx, 0x576C/4, 0x1); ++ INSTANCE_WR(ctx, 0x578C/4, 0x1); ++ INSTANCE_WR(ctx, 0x57AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x57CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x57EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x586C/4, 0x11); ++ INSTANCE_WR(ctx, 0x588C/4, 0x1); ++ INSTANCE_WR(ctx, 0x650C/4, 0x2); ++ INSTANCE_WR(ctx, 0x654C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x66EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x670C/4, 0x10); ++ INSTANCE_WR(ctx, 0x676C/4, 0x1); ++ INSTANCE_WR(ctx, 0x680C/4, 0x4); ++ INSTANCE_WR(ctx, 0x682C/4, 0x400); ++ INSTANCE_WR(ctx, 0x684C/4, 0x300); ++ INSTANCE_WR(ctx, 0x686C/4, 0x1001); ++ INSTANCE_WR(ctx, 0x688C/4, 0x15); ++ INSTANCE_WR(ctx, 0x694C/4, 0x2); ++ INSTANCE_WR(ctx, 0x6A6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x6A8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x6ACC/4, 0x1); ++ INSTANCE_WR(ctx, 0x6D4C/4, 0x10); ++ INSTANCE_WR(ctx, 0x6F6C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6F8C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FAC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FCC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x6FEC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x700C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x702C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x704C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x706C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x708C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x70EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x710C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x712C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x714C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x748C/4, 0x10); ++ INSTANCE_WR(ctx, 0x74CC/4, 0x3F); ++ INSTANCE_WR(ctx, 0x75AC/4, 0x1); ++ INSTANCE_WR(ctx, 0x75EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x762C/4, 0x1); ++ INSTANCE_WR(ctx, 0x77CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x78CC/4, 0xF); ++ INSTANCE_WR(ctx, 0x79CC/4, 0x11); ++ INSTANCE_WR(ctx, 0x7AAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7ACC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7AEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7B0C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7B2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7B4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7B6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7BAC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x7BEC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x7EAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7ECC/4, 0x2); ++ INSTANCE_WR(ctx, 0x7EEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F0C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F2C/4, 0x2); ++ INSTANCE_WR(ctx, 0x7F4C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7F6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x7FEC/4, 0x11); ++ INSTANCE_WR(ctx, 0x800C/4, 0x1); ++ INSTANCE_WR(ctx, 0x8C8C/4, 0x2); ++ INSTANCE_WR(ctx, 0x8CCC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x8E6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x8E8C/4, 0x10); ++ INSTANCE_WR(ctx, 0x8EEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x8F8C/4, 0x4); ++ INSTANCE_WR(ctx, 0x8FAC/4, 0x400); ++ INSTANCE_WR(ctx, 0x8FCC/4, 0x300); ++ INSTANCE_WR(ctx, 0x8FEC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x900C/4, 0x15); ++ INSTANCE_WR(ctx, 0x90CC/4, 0x2); ++ INSTANCE_WR(ctx, 0x91EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x920C/4, 0x10); ++ INSTANCE_WR(ctx, 0x924C/4, 0x1); ++ INSTANCE_WR(ctx, 0x94CC/4, 0x10); ++ INSTANCE_WR(ctx, 0x96EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x970C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x972C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x974C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x976C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x978C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x97EC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x980C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x982C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x984C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x986C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x988C/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x98AC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x98CC/4, 0x3F800000); ++ INSTANCE_WR(ctx, 0x9C0C/4, 0x10); ++ INSTANCE_WR(ctx, 0x9C4C/4, 0x3F); ++ INSTANCE_WR(ctx, 0x9D2C/4, 0x1); ++ INSTANCE_WR(ctx, 0x9D6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x9DAC/4, 0x1); ++ INSTANCE_WR(ctx, 0x9F4C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA04C/4, 0xF); ++ INSTANCE_WR(ctx, 0xA14C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA22C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA24C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA26C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA28C/4, 0x2); ++ INSTANCE_WR(ctx, 0xA2AC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA2CC/4, 0x2); ++ INSTANCE_WR(ctx, 0xA2EC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA32C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0xA36C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0xA62C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA64C/4, 0x2); ++ INSTANCE_WR(ctx, 0xA66C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA68C/4, 0x1); ++ INSTANCE_WR(ctx, 0xA6AC/4, 0x2); ++ INSTANCE_WR(ctx, 0xA6CC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA6EC/4, 0x1); ++ INSTANCE_WR(ctx, 0xA76C/4, 0x11); ++ INSTANCE_WR(ctx, 0xA78C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1530/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1810/4, 0x4); ++ INSTANCE_WR(ctx, 0x1830/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1930/4, 0x4); ++ INSTANCE_WR(ctx, 0x1950/4, 0x4); ++ INSTANCE_WR(ctx, 0x1970/4, 0x80); ++ INSTANCE_WR(ctx, 0x1990/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E30/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E50/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E70/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E90/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F70/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F90/4, 0x3); ++ INSTANCE_WR(ctx, 0x2010/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x16710/4, 0xF); ++ INSTANCE_WR(ctx, 0x16890/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16910/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A30/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B70/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D10/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D30/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D50/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D70/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D90/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB0/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E10/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F10/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F70/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF0/4, 0x11); ++ INSTANCE_WR(ctx, 0x17010/4, 0x1); ++ INSTANCE_WR(ctx, 0x17050/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17070/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17090/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17210/4, 0x1); ++ INSTANCE_WR(ctx, 0x17230/4, 0x2); ++ INSTANCE_WR(ctx, 0x17250/4, 0x1); ++ INSTANCE_WR(ctx, 0x17270/4, 0x1); ++ INSTANCE_WR(ctx, 0x17290/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17310/4, 0x1); ++ INSTANCE_WR(ctx, 0x17330/4, 0x1); ++ INSTANCE_WR(ctx, 0x17350/4, 0x1); ++ INSTANCE_WR(ctx, 0x17370/4, 0x1); ++ INSTANCE_WR(ctx, 0x17390/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17510/4, 0xF); ++ INSTANCE_WR(ctx, 0x17610/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17670/4, 0x11); ++ INSTANCE_WR(ctx, 0x17690/4, 0x1); ++ INSTANCE_WR(ctx, 0x17710/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x17870/4, 0x11); ++ INSTANCE_WR(ctx, 0x17970/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A50/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A90/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD0/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B10/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B50/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x18110/4, 0x8); ++ INSTANCE_WR(ctx, 0x18130/4, 0x8); ++ INSTANCE_WR(ctx, 0x18150/4, 0x8); ++ INSTANCE_WR(ctx, 0x18170/4, 0x8); ++ INSTANCE_WR(ctx, 0x18190/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18310/4, 0x400); ++ INSTANCE_WR(ctx, 0x18330/4, 0x400); ++ INSTANCE_WR(ctx, 0x18350/4, 0x400); ++ INSTANCE_WR(ctx, 0x18370/4, 0x400); ++ INSTANCE_WR(ctx, 0x18390/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x18410/4, 0x300); ++ INSTANCE_WR(ctx, 0x18430/4, 0x300); ++ INSTANCE_WR(ctx, 0x18450/4, 0x300); ++ INSTANCE_WR(ctx, 0x18470/4, 0x300); ++ INSTANCE_WR(ctx, 0x18490/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F0/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F0/4, 0x20); ++ INSTANCE_WR(ctx, 0x18610/4, 0x11); ++ INSTANCE_WR(ctx, 0x18630/4, 0x100); ++ INSTANCE_WR(ctx, 0x18670/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D0/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F0/4, 0x100); ++ INSTANCE_WR(ctx, 0x18730/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18850/4, 0x2); ++ INSTANCE_WR(ctx, 0x18870/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A50/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A90/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB0/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD0/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B70/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C90/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F90/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19010/4, 0x11); ++ INSTANCE_WR(ctx, 0x19070/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19150/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19230/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F0/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19310/4, 0x40); ++ INSTANCE_WR(ctx, 0x19330/4, 0x100); ++ INSTANCE_WR(ctx, 0x19350/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19370/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19610/4, 0x1); ++ INSTANCE_WR(ctx, 0x19650/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19670/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D0/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x19830/4, 0x1); ++ INSTANCE_WR(ctx, 0x19850/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19870/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19890/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F0/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19930/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF0/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C10/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C30/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD0/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D30/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D50/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D70/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D90/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A230/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A250/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A270/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A290/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B0/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D0/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A370/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A390/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A410/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B0/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D0/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB10/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC90/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED10/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE30/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF70/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F110/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F130/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F150/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F170/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F190/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F210/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F310/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F370/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F410/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F450/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F470/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F490/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F610/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F630/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F650/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F670/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F690/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F710/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F730/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F750/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F770/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F790/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F910/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA10/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA70/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA90/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB10/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC70/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF0/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE10/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE50/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE90/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED0/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF10/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF50/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B0/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D0/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F0/4, 0x8); ++ INSTANCE_WR(ctx, 0x30510/4, 0x8); ++ INSTANCE_WR(ctx, 0x30530/4, 0x8); ++ INSTANCE_WR(ctx, 0x30550/4, 0x8); ++ INSTANCE_WR(ctx, 0x30570/4, 0x8); ++ INSTANCE_WR(ctx, 0x30590/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B0/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D0/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F0/4, 0x400); ++ INSTANCE_WR(ctx, 0x30710/4, 0x400); ++ INSTANCE_WR(ctx, 0x30730/4, 0x400); ++ INSTANCE_WR(ctx, 0x30750/4, 0x400); ++ INSTANCE_WR(ctx, 0x30770/4, 0x400); ++ INSTANCE_WR(ctx, 0x30790/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B0/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D0/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F0/4, 0x300); ++ INSTANCE_WR(ctx, 0x30810/4, 0x300); ++ INSTANCE_WR(ctx, 0x30830/4, 0x300); ++ INSTANCE_WR(ctx, 0x30850/4, 0x300); ++ INSTANCE_WR(ctx, 0x30870/4, 0x300); ++ INSTANCE_WR(ctx, 0x30890/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B0/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F0/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F0/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A10/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A30/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A70/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD0/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF0/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B30/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD0/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C50/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB0/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E50/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E90/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB0/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED0/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF0/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F70/4, 0x11); ++ INSTANCE_WR(ctx, 0x31070/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31090/4, 0xF); ++ INSTANCE_WR(ctx, 0x31390/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31410/4, 0x11); ++ INSTANCE_WR(ctx, 0x31470/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B0/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31550/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31630/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B0/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F0/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31710/4, 0x40); ++ INSTANCE_WR(ctx, 0x31730/4, 0x100); ++ INSTANCE_WR(ctx, 0x31750/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31770/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F0/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A10/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A50/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A70/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD0/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C30/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C50/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C70/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C90/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB0/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF0/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1534/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1814/4, 0x4); ++ INSTANCE_WR(ctx, 0x1834/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1934/4, 0x4); ++ INSTANCE_WR(ctx, 0x1954/4, 0x4); ++ INSTANCE_WR(ctx, 0x1974/4, 0x80); ++ INSTANCE_WR(ctx, 0x1994/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E34/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E54/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E74/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E94/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F74/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F94/4, 0x3); ++ INSTANCE_WR(ctx, 0x2014/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B4/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x16714/4, 0xF); ++ INSTANCE_WR(ctx, 0x16894/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16914/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A34/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B74/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D14/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D34/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D54/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D74/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D94/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB4/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E14/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F14/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F74/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF4/4, 0x11); ++ INSTANCE_WR(ctx, 0x17014/4, 0x1); ++ INSTANCE_WR(ctx, 0x17054/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17074/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17094/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17214/4, 0x1); ++ INSTANCE_WR(ctx, 0x17234/4, 0x2); ++ INSTANCE_WR(ctx, 0x17254/4, 0x1); ++ INSTANCE_WR(ctx, 0x17274/4, 0x1); ++ INSTANCE_WR(ctx, 0x17294/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17314/4, 0x1); ++ INSTANCE_WR(ctx, 0x17334/4, 0x1); ++ INSTANCE_WR(ctx, 0x17354/4, 0x1); ++ INSTANCE_WR(ctx, 0x17374/4, 0x1); ++ INSTANCE_WR(ctx, 0x17394/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17514/4, 0xF); ++ INSTANCE_WR(ctx, 0x17614/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17674/4, 0x11); ++ INSTANCE_WR(ctx, 0x17694/4, 0x1); ++ INSTANCE_WR(ctx, 0x17714/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x17874/4, 0x11); ++ INSTANCE_WR(ctx, 0x17974/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A14/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A54/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A94/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD4/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B14/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B54/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B4/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D4/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F4/4, 0x8); ++ INSTANCE_WR(ctx, 0x18114/4, 0x8); ++ INSTANCE_WR(ctx, 0x18134/4, 0x8); ++ INSTANCE_WR(ctx, 0x18154/4, 0x8); ++ INSTANCE_WR(ctx, 0x18174/4, 0x8); ++ INSTANCE_WR(ctx, 0x18194/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D4/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F4/4, 0x400); ++ INSTANCE_WR(ctx, 0x18314/4, 0x400); ++ INSTANCE_WR(ctx, 0x18334/4, 0x400); ++ INSTANCE_WR(ctx, 0x18354/4, 0x400); ++ INSTANCE_WR(ctx, 0x18374/4, 0x400); ++ INSTANCE_WR(ctx, 0x18394/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B4/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D4/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F4/4, 0x300); ++ INSTANCE_WR(ctx, 0x18414/4, 0x300); ++ INSTANCE_WR(ctx, 0x18434/4, 0x300); ++ INSTANCE_WR(ctx, 0x18454/4, 0x300); ++ INSTANCE_WR(ctx, 0x18474/4, 0x300); ++ INSTANCE_WR(ctx, 0x18494/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B4/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F4/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F4/4, 0x20); ++ INSTANCE_WR(ctx, 0x18614/4, 0x11); ++ INSTANCE_WR(ctx, 0x18634/4, 0x100); ++ INSTANCE_WR(ctx, 0x18674/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D4/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F4/4, 0x100); ++ INSTANCE_WR(ctx, 0x18734/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D4/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18854/4, 0x2); ++ INSTANCE_WR(ctx, 0x18874/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A54/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A94/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB4/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD4/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF4/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B74/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C94/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F94/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19014/4, 0x11); ++ INSTANCE_WR(ctx, 0x19074/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19154/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19234/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B4/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F4/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19314/4, 0x40); ++ INSTANCE_WR(ctx, 0x19334/4, 0x100); ++ INSTANCE_WR(ctx, 0x19354/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19374/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19614/4, 0x1); ++ INSTANCE_WR(ctx, 0x19654/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19674/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D4/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x19834/4, 0x1); ++ INSTANCE_WR(ctx, 0x19854/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19874/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19894/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B4/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F4/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19934/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C14/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C34/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD4/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D34/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D54/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D74/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D94/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A234/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A254/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A274/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A294/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B4/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D4/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A374/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A394/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A414/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B4/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D4/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB14/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC94/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED14/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE34/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF74/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F114/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F134/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F154/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F174/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F194/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F214/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F314/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F374/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F414/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F454/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F474/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F494/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F614/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F634/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F654/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F674/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F694/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F714/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F734/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F754/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F774/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F794/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F914/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA14/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA74/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA94/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB14/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC74/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF4/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE14/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE54/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE94/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED4/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF14/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF54/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B4/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D4/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F4/4, 0x8); ++ INSTANCE_WR(ctx, 0x30514/4, 0x8); ++ INSTANCE_WR(ctx, 0x30534/4, 0x8); ++ INSTANCE_WR(ctx, 0x30554/4, 0x8); ++ INSTANCE_WR(ctx, 0x30574/4, 0x8); ++ INSTANCE_WR(ctx, 0x30594/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B4/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D4/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F4/4, 0x400); ++ INSTANCE_WR(ctx, 0x30714/4, 0x400); ++ INSTANCE_WR(ctx, 0x30734/4, 0x400); ++ INSTANCE_WR(ctx, 0x30754/4, 0x400); ++ INSTANCE_WR(ctx, 0x30774/4, 0x400); ++ INSTANCE_WR(ctx, 0x30794/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B4/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D4/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F4/4, 0x300); ++ INSTANCE_WR(ctx, 0x30814/4, 0x300); ++ INSTANCE_WR(ctx, 0x30834/4, 0x300); ++ INSTANCE_WR(ctx, 0x30854/4, 0x300); ++ INSTANCE_WR(ctx, 0x30874/4, 0x300); ++ INSTANCE_WR(ctx, 0x30894/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B4/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F4/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F4/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A14/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A34/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A74/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD4/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF4/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B34/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD4/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C54/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB4/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E54/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E94/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB4/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED4/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF4/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F74/4, 0x11); ++ INSTANCE_WR(ctx, 0x31074/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31094/4, 0xF); ++ INSTANCE_WR(ctx, 0x31394/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31414/4, 0x11); ++ INSTANCE_WR(ctx, 0x31474/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B4/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31554/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31634/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B4/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F4/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31714/4, 0x40); ++ INSTANCE_WR(ctx, 0x31734/4, 0x100); ++ INSTANCE_WR(ctx, 0x31754/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31774/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F4/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A14/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A54/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A74/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD4/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF4/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C34/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C54/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C74/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C94/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB4/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF4/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1538/4, 0x4); ++ INSTANCE_WR(ctx, 0x17F8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1818/4, 0x4); ++ INSTANCE_WR(ctx, 0x1838/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18D8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1938/4, 0x4); ++ INSTANCE_WR(ctx, 0x1958/4, 0x4); ++ INSTANCE_WR(ctx, 0x1978/4, 0x80); ++ INSTANCE_WR(ctx, 0x1998/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E38/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E58/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E78/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E98/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EB8/4, 0x3); ++ INSTANCE_WR(ctx, 0x1ED8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F78/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F98/4, 0x3); ++ INSTANCE_WR(ctx, 0x2018/4, 0x4); ++ INSTANCE_WR(ctx, 0x164B8/4, 0x4); ++ INSTANCE_WR(ctx, 0x164D8/4, 0x3); ++ INSTANCE_WR(ctx, 0x16718/4, 0xF); ++ INSTANCE_WR(ctx, 0x16898/4, 0x4); ++ INSTANCE_WR(ctx, 0x168B8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168D8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168F8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16918/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A38/4, 0x1); ++ INSTANCE_WR(ctx, 0x16AB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B78/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D18/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D38/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D58/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D78/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D98/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DB8/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E18/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F18/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F78/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FF8/4, 0x11); ++ INSTANCE_WR(ctx, 0x17018/4, 0x1); ++ INSTANCE_WR(ctx, 0x17058/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17078/4, 0xCF); ++ INSTANCE_WR(ctx, 0x17098/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17218/4, 0x1); ++ INSTANCE_WR(ctx, 0x17238/4, 0x2); ++ INSTANCE_WR(ctx, 0x17258/4, 0x1); ++ INSTANCE_WR(ctx, 0x17278/4, 0x1); ++ INSTANCE_WR(ctx, 0x17298/4, 0x2); ++ INSTANCE_WR(ctx, 0x172B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x172F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17318/4, 0x1); ++ INSTANCE_WR(ctx, 0x17338/4, 0x1); ++ INSTANCE_WR(ctx, 0x17358/4, 0x1); ++ INSTANCE_WR(ctx, 0x17378/4, 0x1); ++ INSTANCE_WR(ctx, 0x17398/4, 0x1); ++ INSTANCE_WR(ctx, 0x173B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x173D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x173F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x174F8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x17518/4, 0xF); ++ INSTANCE_WR(ctx, 0x17618/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x17678/4, 0x11); ++ INSTANCE_WR(ctx, 0x17698/4, 0x1); ++ INSTANCE_WR(ctx, 0x17718/4, 0x4); ++ INSTANCE_WR(ctx, 0x177D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x17878/4, 0x11); ++ INSTANCE_WR(ctx, 0x17978/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A18/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A58/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A98/4, 0x1); ++ INSTANCE_WR(ctx, 0x17AD8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B18/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B58/4, 0x1); ++ INSTANCE_WR(ctx, 0x180B8/4, 0x8); ++ INSTANCE_WR(ctx, 0x180D8/4, 0x8); ++ INSTANCE_WR(ctx, 0x180F8/4, 0x8); ++ INSTANCE_WR(ctx, 0x18118/4, 0x8); ++ INSTANCE_WR(ctx, 0x18138/4, 0x8); ++ INSTANCE_WR(ctx, 0x18158/4, 0x8); ++ INSTANCE_WR(ctx, 0x18178/4, 0x8); ++ INSTANCE_WR(ctx, 0x18198/4, 0x8); ++ INSTANCE_WR(ctx, 0x181B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x182B8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182D8/4, 0x400); ++ INSTANCE_WR(ctx, 0x182F8/4, 0x400); ++ INSTANCE_WR(ctx, 0x18318/4, 0x400); ++ INSTANCE_WR(ctx, 0x18338/4, 0x400); ++ INSTANCE_WR(ctx, 0x18358/4, 0x400); ++ INSTANCE_WR(ctx, 0x18378/4, 0x400); ++ INSTANCE_WR(ctx, 0x18398/4, 0x400); ++ INSTANCE_WR(ctx, 0x183B8/4, 0x400); ++ INSTANCE_WR(ctx, 0x183D8/4, 0x300); ++ INSTANCE_WR(ctx, 0x183F8/4, 0x300); ++ INSTANCE_WR(ctx, 0x18418/4, 0x300); ++ INSTANCE_WR(ctx, 0x18438/4, 0x300); ++ INSTANCE_WR(ctx, 0x18458/4, 0x300); ++ INSTANCE_WR(ctx, 0x18478/4, 0x300); ++ INSTANCE_WR(ctx, 0x18498/4, 0x300); ++ INSTANCE_WR(ctx, 0x184B8/4, 0x300); ++ INSTANCE_WR(ctx, 0x184D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x184F8/4, 0xF); ++ INSTANCE_WR(ctx, 0x185F8/4, 0x20); ++ INSTANCE_WR(ctx, 0x18618/4, 0x11); ++ INSTANCE_WR(ctx, 0x18638/4, 0x100); ++ INSTANCE_WR(ctx, 0x18678/4, 0x1); ++ INSTANCE_WR(ctx, 0x186D8/4, 0x40); ++ INSTANCE_WR(ctx, 0x186F8/4, 0x100); ++ INSTANCE_WR(ctx, 0x18738/4, 0x3); ++ INSTANCE_WR(ctx, 0x187D8/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x18858/4, 0x2); ++ INSTANCE_WR(ctx, 0x18878/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A58/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A98/4, 0x1); ++ INSTANCE_WR(ctx, 0x18AB8/4, 0x400); ++ INSTANCE_WR(ctx, 0x18AD8/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AF8/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B78/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C98/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F98/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x19018/4, 0x11); ++ INSTANCE_WR(ctx, 0x19078/4, 0x4); ++ INSTANCE_WR(ctx, 0x190B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x190D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19158/4, 0x1); ++ INSTANCE_WR(ctx, 0x191F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19238/4, 0x1); ++ INSTANCE_WR(ctx, 0x192B8/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192F8/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x19318/4, 0x40); ++ INSTANCE_WR(ctx, 0x19338/4, 0x100); ++ INSTANCE_WR(ctx, 0x19358/4, 0x10100); ++ INSTANCE_WR(ctx, 0x19378/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195D8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195F8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x19618/4, 0x1); ++ INSTANCE_WR(ctx, 0x19658/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x19678/4, 0x1); ++ INSTANCE_WR(ctx, 0x196D8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x19838/4, 0x1); ++ INSTANCE_WR(ctx, 0x19858/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x19878/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x19898/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198B8/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198F8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x19938/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BF8/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C18/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C38/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CD8/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D38/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D58/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D78/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D98/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A238/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A258/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A278/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A298/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2B8/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2D8/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A378/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A398/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A418/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8B8/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8D8/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB18/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC98/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECB8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECD8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECF8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED18/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE38/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF78/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F118/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F138/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F158/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F178/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F198/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F218/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F318/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F378/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F418/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F458/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F478/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F498/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F618/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F638/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F658/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F678/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F698/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F718/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F738/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F758/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F778/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F798/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7F8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8F8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F918/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA18/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA78/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA98/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB18/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC78/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDF8/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE18/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE58/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE98/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FED8/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF18/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF58/4, 0x1); ++ INSTANCE_WR(ctx, 0x304B8/4, 0x8); ++ INSTANCE_WR(ctx, 0x304D8/4, 0x8); ++ INSTANCE_WR(ctx, 0x304F8/4, 0x8); ++ INSTANCE_WR(ctx, 0x30518/4, 0x8); ++ INSTANCE_WR(ctx, 0x30538/4, 0x8); ++ INSTANCE_WR(ctx, 0x30558/4, 0x8); ++ INSTANCE_WR(ctx, 0x30578/4, 0x8); ++ INSTANCE_WR(ctx, 0x30598/4, 0x8); ++ INSTANCE_WR(ctx, 0x305B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x306B8/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306D8/4, 0x400); ++ INSTANCE_WR(ctx, 0x306F8/4, 0x400); ++ INSTANCE_WR(ctx, 0x30718/4, 0x400); ++ INSTANCE_WR(ctx, 0x30738/4, 0x400); ++ INSTANCE_WR(ctx, 0x30758/4, 0x400); ++ INSTANCE_WR(ctx, 0x30778/4, 0x400); ++ INSTANCE_WR(ctx, 0x30798/4, 0x400); ++ INSTANCE_WR(ctx, 0x307B8/4, 0x400); ++ INSTANCE_WR(ctx, 0x307D8/4, 0x300); ++ INSTANCE_WR(ctx, 0x307F8/4, 0x300); ++ INSTANCE_WR(ctx, 0x30818/4, 0x300); ++ INSTANCE_WR(ctx, 0x30838/4, 0x300); ++ INSTANCE_WR(ctx, 0x30858/4, 0x300); ++ INSTANCE_WR(ctx, 0x30878/4, 0x300); ++ INSTANCE_WR(ctx, 0x30898/4, 0x300); ++ INSTANCE_WR(ctx, 0x308B8/4, 0x300); ++ INSTANCE_WR(ctx, 0x308D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x308F8/4, 0xF); ++ INSTANCE_WR(ctx, 0x309F8/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A18/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A38/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A78/4, 0x1); ++ INSTANCE_WR(ctx, 0x30AD8/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AF8/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B38/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BD8/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C58/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DB8/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E58/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E98/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EB8/4, 0x400); ++ INSTANCE_WR(ctx, 0x30ED8/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EF8/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F78/4, 0x11); ++ INSTANCE_WR(ctx, 0x31078/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x31098/4, 0xF); ++ INSTANCE_WR(ctx, 0x31398/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x31418/4, 0x11); ++ INSTANCE_WR(ctx, 0x31478/4, 0x4); ++ INSTANCE_WR(ctx, 0x314B8/4, 0x1); ++ INSTANCE_WR(ctx, 0x314D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31558/4, 0x1); ++ INSTANCE_WR(ctx, 0x315F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31638/4, 0x1); ++ INSTANCE_WR(ctx, 0x316B8/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316F8/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x31718/4, 0x40); ++ INSTANCE_WR(ctx, 0x31738/4, 0x100); ++ INSTANCE_WR(ctx, 0x31758/4, 0x10100); ++ INSTANCE_WR(ctx, 0x31778/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319D8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319F8/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A18/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A58/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A78/4, 0x1); ++ INSTANCE_WR(ctx, 0x31AD8/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BF8/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C38/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C58/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C78/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C98/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CB8/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CF8/4, 0x1A); ++ INSTANCE_WR(ctx, 0x153C/4, 0x4); ++ INSTANCE_WR(ctx, 0x17FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x181C/4, 0x4); ++ INSTANCE_WR(ctx, 0x183C/4, 0x608080); ++ INSTANCE_WR(ctx, 0x18DC/4, 0x4); ++ INSTANCE_WR(ctx, 0x193C/4, 0x4); ++ INSTANCE_WR(ctx, 0x195C/4, 0x4); ++ INSTANCE_WR(ctx, 0x197C/4, 0x80); ++ INSTANCE_WR(ctx, 0x199C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E3C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E5C/4, 0x80); ++ INSTANCE_WR(ctx, 0x1E7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1E9C/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1EBC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1EDC/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1F9C/4, 0x3); ++ INSTANCE_WR(ctx, 0x201C/4, 0x4); ++ INSTANCE_WR(ctx, 0x164BC/4, 0x4); ++ INSTANCE_WR(ctx, 0x164DC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1671C/4, 0xF); ++ INSTANCE_WR(ctx, 0x1689C/4, 0x4); ++ INSTANCE_WR(ctx, 0x168BC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168DC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x168FC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x1691C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x16A3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16ABC/4, 0x1); ++ INSTANCE_WR(ctx, 0x16B7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x16D7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16D9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x16DBC/4, 0x2); ++ INSTANCE_WR(ctx, 0x16DDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x16E1C/4, 0x11); ++ INSTANCE_WR(ctx, 0x16F1C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x16F7C/4, 0x4); ++ INSTANCE_WR(ctx, 0x16FFC/4, 0x11); ++ INSTANCE_WR(ctx, 0x1701C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1705C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x1707C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x1709C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x171FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1721C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1723C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1725C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1727C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1729C/4, 0x2); ++ INSTANCE_WR(ctx, 0x172BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x172FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1731C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1733C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1735C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1737C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1739C/4, 0x1); ++ INSTANCE_WR(ctx, 0x173BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x173DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x173FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x174FC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x1751C/4, 0xF); ++ INSTANCE_WR(ctx, 0x1761C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1767C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1769C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1771C/4, 0x4); ++ INSTANCE_WR(ctx, 0x177DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1787C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1797C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x179FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x17A1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17A9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17ADC/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x17B1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x17B5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x180BC/4, 0x8); ++ INSTANCE_WR(ctx, 0x180DC/4, 0x8); ++ INSTANCE_WR(ctx, 0x180FC/4, 0x8); ++ INSTANCE_WR(ctx, 0x1811C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1813C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1815C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1817C/4, 0x8); ++ INSTANCE_WR(ctx, 0x1819C/4, 0x8); ++ INSTANCE_WR(ctx, 0x181BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x182BC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x182DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x182FC/4, 0x400); ++ INSTANCE_WR(ctx, 0x1831C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1833C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1835C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1837C/4, 0x400); ++ INSTANCE_WR(ctx, 0x1839C/4, 0x400); ++ INSTANCE_WR(ctx, 0x183BC/4, 0x400); ++ INSTANCE_WR(ctx, 0x183DC/4, 0x300); ++ INSTANCE_WR(ctx, 0x183FC/4, 0x300); ++ INSTANCE_WR(ctx, 0x1841C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1843C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1845C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1847C/4, 0x300); ++ INSTANCE_WR(ctx, 0x1849C/4, 0x300); ++ INSTANCE_WR(ctx, 0x184BC/4, 0x300); ++ INSTANCE_WR(ctx, 0x184DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x184FC/4, 0xF); ++ INSTANCE_WR(ctx, 0x185FC/4, 0x20); ++ INSTANCE_WR(ctx, 0x1861C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1863C/4, 0x100); ++ INSTANCE_WR(ctx, 0x1867C/4, 0x1); ++ INSTANCE_WR(ctx, 0x186DC/4, 0x40); ++ INSTANCE_WR(ctx, 0x186FC/4, 0x100); ++ INSTANCE_WR(ctx, 0x1873C/4, 0x3); ++ INSTANCE_WR(ctx, 0x187DC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1885C/4, 0x2); ++ INSTANCE_WR(ctx, 0x1887C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x189BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x18A5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x18A9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x18ABC/4, 0x400); ++ INSTANCE_WR(ctx, 0x18ADC/4, 0x300); ++ INSTANCE_WR(ctx, 0x18AFC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x18B7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x18C7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x18C9C/4, 0xF); ++ INSTANCE_WR(ctx, 0x18F9C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x1901C/4, 0x11); ++ INSTANCE_WR(ctx, 0x1907C/4, 0x4); ++ INSTANCE_WR(ctx, 0x190BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x190DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1915C/4, 0x1); ++ INSTANCE_WR(ctx, 0x191FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1923C/4, 0x1); ++ INSTANCE_WR(ctx, 0x192BC/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x192FC/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x1931C/4, 0x40); ++ INSTANCE_WR(ctx, 0x1933C/4, 0x100); ++ INSTANCE_WR(ctx, 0x1935C/4, 0x10100); ++ INSTANCE_WR(ctx, 0x1937C/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x195DC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x195FC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x1961C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1965C/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x1967C/4, 0x1); ++ INSTANCE_WR(ctx, 0x196DC/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x197FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x1983C/4, 0x1); ++ INSTANCE_WR(ctx, 0x1985C/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x1987C/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x1989C/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x198BC/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x198FC/4, 0x1A); ++ INSTANCE_WR(ctx, 0x1993C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19BFC/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19C3C/4, 0x608080); ++ INSTANCE_WR(ctx, 0x19CDC/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D3C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x19D7C/4, 0x80); ++ INSTANCE_WR(ctx, 0x19D9C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A23C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A25C/4, 0x80); ++ INSTANCE_WR(ctx, 0x1A27C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A29C/4, 0x3020100); ++ INSTANCE_WR(ctx, 0x1A2BC/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A2DC/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A37C/4, 0x4); ++ INSTANCE_WR(ctx, 0x1A39C/4, 0x3); ++ INSTANCE_WR(ctx, 0x1A41C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8BC/4, 0x4); ++ INSTANCE_WR(ctx, 0x2E8DC/4, 0x3); ++ INSTANCE_WR(ctx, 0x2EB1C/4, 0xF); ++ INSTANCE_WR(ctx, 0x2EC9C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2ECBC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECDC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ECFC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2ED1C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x2EE3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EEBC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2EF7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F11C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F13C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F15C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F17C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F19C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F1BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F1DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F21C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F31C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F37C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2F3FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F41C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F45C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F47C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F49C/4, 0xCF); ++ INSTANCE_WR(ctx, 0x2F5FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F61C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F63C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F65C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F67C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F69C/4, 0x2); ++ INSTANCE_WR(ctx, 0x2F6BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F6FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F71C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F73C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F75C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F77C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F79C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2F7FC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2F8FC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2F91C/4, 0xF); ++ INSTANCE_WR(ctx, 0x2FA1C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x2FA7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FA9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FB1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x2FBDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FC7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FD7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x2FDFC/4, 0x11); ++ INSTANCE_WR(ctx, 0x2FE1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FE9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FEDC/4, 0x7FF); ++ INSTANCE_WR(ctx, 0x2FF1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x2FF5C/4, 0x1); ++ INSTANCE_WR(ctx, 0x304BC/4, 0x8); ++ INSTANCE_WR(ctx, 0x304DC/4, 0x8); ++ INSTANCE_WR(ctx, 0x304FC/4, 0x8); ++ INSTANCE_WR(ctx, 0x3051C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3053C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3055C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3057C/4, 0x8); ++ INSTANCE_WR(ctx, 0x3059C/4, 0x8); ++ INSTANCE_WR(ctx, 0x305BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x306BC/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x306DC/4, 0x400); ++ INSTANCE_WR(ctx, 0x306FC/4, 0x400); ++ INSTANCE_WR(ctx, 0x3071C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3073C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3075C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3077C/4, 0x400); ++ INSTANCE_WR(ctx, 0x3079C/4, 0x400); ++ INSTANCE_WR(ctx, 0x307BC/4, 0x400); ++ INSTANCE_WR(ctx, 0x307DC/4, 0x300); ++ INSTANCE_WR(ctx, 0x307FC/4, 0x300); ++ INSTANCE_WR(ctx, 0x3081C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3083C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3085C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3087C/4, 0x300); ++ INSTANCE_WR(ctx, 0x3089C/4, 0x300); ++ INSTANCE_WR(ctx, 0x308BC/4, 0x300); ++ INSTANCE_WR(ctx, 0x308DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x308FC/4, 0xF); ++ INSTANCE_WR(ctx, 0x309FC/4, 0x20); ++ INSTANCE_WR(ctx, 0x30A1C/4, 0x11); ++ INSTANCE_WR(ctx, 0x30A3C/4, 0x100); ++ INSTANCE_WR(ctx, 0x30A7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30ADC/4, 0x40); ++ INSTANCE_WR(ctx, 0x30AFC/4, 0x100); ++ INSTANCE_WR(ctx, 0x30B3C/4, 0x3); ++ INSTANCE_WR(ctx, 0x30BDC/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x30C5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x30C7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x30DBC/4, 0x1); ++ INSTANCE_WR(ctx, 0x30E5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x30E9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x30EBC/4, 0x400); ++ INSTANCE_WR(ctx, 0x30EDC/4, 0x300); ++ INSTANCE_WR(ctx, 0x30EFC/4, 0x1001); ++ INSTANCE_WR(ctx, 0x30F7C/4, 0x11); ++ INSTANCE_WR(ctx, 0x3107C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x3109C/4, 0xF); ++ INSTANCE_WR(ctx, 0x3139C/4, 0x1FFE67); ++ INSTANCE_WR(ctx, 0x3141C/4, 0x11); ++ INSTANCE_WR(ctx, 0x3147C/4, 0x4); ++ INSTANCE_WR(ctx, 0x314BC/4, 0x1); ++ INSTANCE_WR(ctx, 0x314DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3155C/4, 0x1); ++ INSTANCE_WR(ctx, 0x315FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x3163C/4, 0x1); ++ INSTANCE_WR(ctx, 0x316BC/4, 0x2A712488); ++ INSTANCE_WR(ctx, 0x316FC/4, 0x4085C000); ++ INSTANCE_WR(ctx, 0x3171C/4, 0x40); ++ INSTANCE_WR(ctx, 0x3173C/4, 0x100); ++ INSTANCE_WR(ctx, 0x3175C/4, 0x10100); ++ INSTANCE_WR(ctx, 0x3177C/4, 0x2800000); ++ INSTANCE_WR(ctx, 0x319DC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x319FC/4, 0x4E3BFDF); ++ INSTANCE_WR(ctx, 0x31A1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31A5C/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31A7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31ADC/4, 0xFFFF00); ++ INSTANCE_WR(ctx, 0x31BFC/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C3C/4, 0x1); ++ INSTANCE_WR(ctx, 0x31C5C/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x31C7C/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x31C9C/4, 0xB8A89888); ++ INSTANCE_WR(ctx, 0x31CBC/4, 0xF8E8D8C8); ++ INSTANCE_WR(ctx, 0x31CFC/4, 0x1A); ++ INSTANCE_WR(ctx, 0x5D000/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D040/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D060/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D080/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D0A0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D100/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D160/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D1A0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1C0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D340/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D360/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D380/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D3A0/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D400/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D460/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D4A0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4C0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D620/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D700/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D720/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D740/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D760/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D780/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D7A0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7E0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D820/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8E0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D900/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D940/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D960/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA80/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB20/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC60/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC80/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCC0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCE0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD00/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD20/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD60/4, 0x4); ++ INSTANCE_WR(ctx, 0x651C0/4, 0x11); ++ INSTANCE_WR(ctx, 0x65200/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D024/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D044/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D064/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D084/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D144/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D184/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1A4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D324/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D344/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D364/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D384/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D444/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D484/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4A4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D604/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6E4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D704/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D724/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D744/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D764/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D784/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7A4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D804/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8C4/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8E4/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D924/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D944/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA64/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB04/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC44/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC64/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC84/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCC4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCE4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD04/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD44/4, 0x4); ++ INSTANCE_WR(ctx, 0x651A4/4, 0x11); ++ INSTANCE_WR(ctx, 0x651E4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D028/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D048/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D068/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D088/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D148/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D188/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1A8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D328/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D348/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D368/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D388/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D448/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D488/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4A8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D608/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6E8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D708/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D728/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D748/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D768/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D788/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7A8/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7C8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D808/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8C8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8E8/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D928/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D948/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA68/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB08/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC48/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC68/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC88/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCA8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCC8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCE8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD08/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD48/4, 0x4); ++ INSTANCE_WR(ctx, 0x651A8/4, 0x11); ++ INSTANCE_WR(ctx, 0x651E8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D02C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D04C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D06C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D08C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D14C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D18C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D32C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D34C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D36C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D38C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D44C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D48C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4AC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D60C/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6EC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D70C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D72C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D74C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D76C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D78C/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7AC/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7CC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D80C/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8CC/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8EC/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D92C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D94C/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA6C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB0C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC4C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC6C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC8C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCAC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCCC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCEC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD0C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD4C/4, 0x4); ++ INSTANCE_WR(ctx, 0x651AC/4, 0x11); ++ INSTANCE_WR(ctx, 0x651EC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D030/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D050/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D070/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D090/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D150/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D190/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D330/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D350/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D370/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D390/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D450/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D490/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D610/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F0/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D710/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D730/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D750/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D770/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D790/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B0/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D810/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D0/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F0/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D930/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D950/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA70/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB10/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC50/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC70/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC90/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB0/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD10/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD50/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B0/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F0/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D034/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D054/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D074/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D094/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D154/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D194/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D334/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D354/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D374/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D394/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D454/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D494/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D614/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F4/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D714/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D734/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D754/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D774/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D794/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B4/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D814/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D4/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F4/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D934/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D954/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA74/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB14/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC54/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC74/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC94/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB4/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD14/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD54/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B4/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F4/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D038/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D058/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D078/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D098/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D158/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D198/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D338/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D358/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D378/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D398/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D458/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D498/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4B8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D618/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6F8/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D718/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D738/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D758/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D778/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D798/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7B8/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7D8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D818/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8D8/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8F8/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D938/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D958/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA78/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB18/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC58/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC78/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC98/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCB8/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCD8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCF8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD18/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD58/4, 0x4); ++ INSTANCE_WR(ctx, 0x651B8/4, 0x11); ++ INSTANCE_WR(ctx, 0x651F8/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D03C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D05C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D07C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D09C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D0FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D15C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D19C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D1BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D33C/4, 0x80); ++ INSTANCE_WR(ctx, 0x5D35C/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x5D37C/4, 0x4000400); ++ INSTANCE_WR(ctx, 0x5D39C/4, 0x1000); ++ INSTANCE_WR(ctx, 0x5D3FC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D45C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D49C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D4BC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D61C/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D6FC/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D71C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D73C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D75C/4, 0xFFFF); ++ INSTANCE_WR(ctx, 0x5D77C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D79C/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7BC/4, 0x10001); ++ INSTANCE_WR(ctx, 0x5D7DC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5D81C/4, 0x1FE21); ++ INSTANCE_WR(ctx, 0x5D8DC/4, 0x8100C12); ++ INSTANCE_WR(ctx, 0x5D8FC/4, 0x4); ++ INSTANCE_WR(ctx, 0x5D93C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5D95C/4, 0x11); ++ INSTANCE_WR(ctx, 0x5DA7C/4, 0xFAC6881); ++ INSTANCE_WR(ctx, 0x5DB1C/4, 0x4); ++ INSTANCE_WR(ctx, 0x5DC5C/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DC7C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DC9C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCBC/4, 0x2); ++ INSTANCE_WR(ctx, 0x5DCDC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DCFC/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD1C/4, 0x1); ++ INSTANCE_WR(ctx, 0x5DD5C/4, 0x4); ++ INSTANCE_WR(ctx, 0x651BC/4, 0x11); ++ INSTANCE_WR(ctx, 0x651FC/4, 0x1); ++} ++ ++static void ++nvaa_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ctx = ref->gpuobj; ++ ++ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030); ++ INSTANCE_WR(ctx, 0x001d0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x001d4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00220/4, 0x0000fe0c); ++ INSTANCE_WR(ctx, 0x00238/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x00264/4, 0x00000187); ++ INSTANCE_WR(ctx, 0x00278/4, 0x00001018); ++ INSTANCE_WR(ctx, 0x0027c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x002cc/4, 0x042500df); ++ INSTANCE_WR(ctx, 0x002d4/4, 0x00000600); ++ INSTANCE_WR(ctx, 0x002ec/4, 0x01000000); ++ INSTANCE_WR(ctx, 0x002f0/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x002f8/4, 0x00000800); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00310/4, 0x000e0080); ++ INSTANCE_WR(ctx, 0x00310/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00338/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0033c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00350/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00368/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00370/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00380/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00384/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x00388/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00390/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00394/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003e4/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003ec/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x003f8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x003fc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00400/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00408/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00414/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x00428/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0042c/4, 0x00000070); ++ INSTANCE_WR(ctx, 0x00430/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00444/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x0044c/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00450/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000029); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00458/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000006); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000007); ++ INSTANCE_WR(ctx, 0x00478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x004d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00508/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000012); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00524/4, 0x0000000c); ++ INSTANCE_WR(ctx, 0x00524/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00540/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00544/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00548/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00558/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x0055c/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00584/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00588/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0058c/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00594/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00598/4, 0x00000014); ++ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005a8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005bc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x005c4/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005dc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x005fc/4, 0x00000200); ++ INSTANCE_WR(ctx, 0x00604/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00608/4, 0x000000f0); ++ INSTANCE_WR(ctx, 0x0060c/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x00618/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0061c/4, 0x000000f0); ++ INSTANCE_WR(ctx, 0x00620/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x00628/4, 0x00000009); ++ INSTANCE_WR(ctx, 0x00634/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00638/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00640/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00650/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00658/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00660/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00668/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00670/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00674/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x00678/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00680/4, 0x00001f80); ++ INSTANCE_WR(ctx, 0x00698/4, 0x3b74f821); ++ INSTANCE_WR(ctx, 0x0069c/4, 0x89058001); ++ INSTANCE_WR(ctx, 0x006a4/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x006a8/4, 0x000000ff); ++ INSTANCE_WR(ctx, 0x006b0/4, 0x027c10fa); ++ INSTANCE_WR(ctx, 0x006b4/4, 0x400000c0); ++ INSTANCE_WR(ctx, 0x006b8/4, 0xb7892080); ++ INSTANCE_WR(ctx, 0x006cc/4, 0x003d0040); ++ INSTANCE_WR(ctx, 0x006d4/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x006f4/4, 0x003d0040); ++ INSTANCE_WR(ctx, 0x006f8/4, 0x00000022); ++ INSTANCE_WR(ctx, 0x00740/4, 0x0000ff0a); ++ INSTANCE_WR(ctx, 0x00748/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0074c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00750/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00760/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00764/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00788/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00790/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00798/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x007a0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x007a4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x007b0/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x007c8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007cc/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x007d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x007e0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x007e4/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00808/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00810/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00818/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00820/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00824/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00830/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x00848/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x0084c/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x00850/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x00860/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x00864/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00888/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00890/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00898/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x008a0/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x008a4/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x008b0/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x008c8/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008cc/4, 0x00160000); ++ INSTANCE_WR(ctx, 0x008d0/4, 0x01800000); ++ INSTANCE_WR(ctx, 0x008e0/4, 0x0003ffff); ++ INSTANCE_WR(ctx, 0x008e4/4, 0x300c0000); ++ INSTANCE_WR(ctx, 0x00908/4, 0x00010401); ++ INSTANCE_WR(ctx, 0x00910/4, 0x00000078); ++ INSTANCE_WR(ctx, 0x00918/4, 0x000000bf); ++ INSTANCE_WR(ctx, 0x00920/4, 0x00001210); ++ INSTANCE_WR(ctx, 0x00924/4, 0x08000080); ++ INSTANCE_WR(ctx, 0x00930/4, 0x0000003e); ++ INSTANCE_WR(ctx, 0x0094c/4, 0x01127070); ++ INSTANCE_WR(ctx, 0x0095c/4, 0x07ffffff); ++ INSTANCE_WR(ctx, 0x00978/4, 0x00120407); ++ INSTANCE_WR(ctx, 0x00978/4, 0x05091507); ++ INSTANCE_WR(ctx, 0x00978/4, 0x05010202); ++ INSTANCE_WR(ctx, 0x00978/4, 0x00030201); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x0d0c0b0a); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00141210); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x000001f0); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x009a0/4, 0x00008000); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00039e00); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00003800); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x003fe006); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x003fe000); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x00404040); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x0cf7f007); ++ INSTANCE_WR(ctx, 0x009c0/4, 0x02bf7fff); ++ INSTANCE_WR(ctx, 0x07ba0/4, 0x00000021); ++ INSTANCE_WR(ctx, 0x07bc0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07be0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07c00/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07c20/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07c40/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07ca0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x07cc0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x07ce0/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07d00/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x07d20/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1a7c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a7e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a800/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a820/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a840/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a860/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a880/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a8e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a900/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a920/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a940/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a960/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a980/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1a9a0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1ae40/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1ae60/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x1aec0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x1aee0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1af80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x1b020/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x1b080/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b0c0/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b0e0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1b100/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x1b120/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1b140/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1b160/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1be20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1bf00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1bf20/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1bf80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1c1e0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1c2c0/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1c3c0/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x1c3e0/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x1c5e0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1c640/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1c6a0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x1c6c0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1c6e0/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x1c760/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x1c780/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x1c820/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1ca40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1ca60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1ca80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1caa0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cac0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cae0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb40/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb60/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cb80/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cba0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cbc0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cbe0/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc00/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc20/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x1cc40/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x1d120/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x1d140/4, 0x00000005); ++ INSTANCE_WR(ctx, 0x1d1a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x1d1e0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d200/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d220/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d240/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x1d260/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1d2e0/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x1d300/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x1d340/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x1dae0/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x1db20/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db40/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db60/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1db80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dca0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dcc0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dd00/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x1dd40/4, 0x00000102); ++ INSTANCE_WR(ctx, 0x1de80/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dea0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dec0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x1dee0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a04/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a24/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00a64/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x00a84/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00aa4/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00ae4/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0b344/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b364/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b3a4/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x0b3c4/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0b3e4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0b424/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x0b464/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x010c8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x010e8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39a68/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39a88/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39aa8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39ac8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x39b08/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39b48/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x39b68/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x39b88/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39ba8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x39bc8/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x39c28/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x39c48/4, 0x00000027); ++ INSTANCE_WR(ctx, 0x39ca8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x414e8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x417c8/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00a2c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00acc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x00b6c/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x00d6c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x00f2c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f4c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f8c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00fac/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x00fec/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x0118c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0362c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x0366c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x041cc/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x1484c/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x15950/4, 0x003fffff); ++ INSTANCE_WR(ctx, 0x159b0/4, 0x00001fff); ++ INSTANCE_WR(ctx, 0x00a34/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x00bb4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00bd4/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00c74/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00c94/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x00e14/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00e54/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x00ff4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01014/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01074/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01114/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01134/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x01154/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x01174/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x01194/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x01254/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01374/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01394/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x013d4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01654/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01874/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01894/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018b4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018d4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x018f4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01914/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01934/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01954/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01974/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01994/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019b4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019d4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x019f4/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a14/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a34/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01a54/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01d94/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01dd4/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x01eb4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01ef4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01f94/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x02114/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02214/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02314/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x023f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02414/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02434/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02454/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02474/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02494/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x024b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x024f4/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x02534/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x028b4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x028d4/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x028f4/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02914/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02934/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x02954/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02974/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02a14/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x02a34/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00a18/4, 0x0000003f); ++ INSTANCE_WR(ctx, 0x00b78/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x00b98/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x00bb8/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x00cd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00d58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00f98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00fb8/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x00fd8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x00ff8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01018/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01038/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x01458/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01478/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01498/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014b8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014d8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x014f8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01518/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01538/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01558/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01578/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01598/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015b8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015d8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x015f8/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01618/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01638/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x01658/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x016b8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x01878/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x01898/4, 0x04000000); ++ INSTANCE_WR(ctx, 0x018d8/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01958/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x01a38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01a58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01a78/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x01a98/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x01ad8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x01b98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01bd8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01bf8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01c38/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x01c58/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x01d38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01d78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01d98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01db8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01e58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x01e98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x01eb8/4, 0x00000015); ++ INSTANCE_WR(ctx, 0x01f38/4, 0x04444480); ++ INSTANCE_WR(ctx, 0x02698/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x026d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02758/4, 0x2a712488); ++ INSTANCE_WR(ctx, 0x02798/4, 0x4085c000); ++ INSTANCE_WR(ctx, 0x027b8/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x027d8/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x027f8/4, 0x00010100); ++ INSTANCE_WR(ctx, 0x02818/4, 0x02800000); ++ INSTANCE_WR(ctx, 0x02b58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x02cd8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x02cf8/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d18/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d38/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02d58/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x02e78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x02fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03178/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03198/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031b8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x031d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x031f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03218/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03238/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03278/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03378/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x033d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x03458/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03478/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x034b8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x034d8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x034f8/4, 0x000000cf); ++ INSTANCE_WR(ctx, 0x03658/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03678/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03698/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x036b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x036d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x036f8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x03718/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03758/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03798/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x037f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03818/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03838/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03858/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03958/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x03978/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x03a78/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x03ad8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03af8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03b78/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x03c38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03cd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03dd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x03e58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x03e78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03eb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03f38/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x03f78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x03fb8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04518/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04538/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04558/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04578/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04598/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045b8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045d8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x045f8/4, 0x00000008); ++ INSTANCE_WR(ctx, 0x04618/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04718/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x04738/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04758/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04778/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04798/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047b8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047d8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x047f8/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04818/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04838/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04858/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04878/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04898/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048b8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048d8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x048f8/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04918/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04958/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x04a58/4, 0x00000020); ++ INSTANCE_WR(ctx, 0x04a78/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x04a98/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x04ad8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04b38/4, 0x00000040); ++ INSTANCE_WR(ctx, 0x04b58/4, 0x00000100); ++ INSTANCE_WR(ctx, 0x04b98/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x04c38/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x04cb8/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x04cd8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x04e18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04eb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x04ef8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x04f18/4, 0x00000400); ++ INSTANCE_WR(ctx, 0x04f38/4, 0x00000300); ++ INSTANCE_WR(ctx, 0x04f58/4, 0x00001001); ++ INSTANCE_WR(ctx, 0x04fd8/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x050d8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x050f8/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x053f8/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05418/4, 0x001ffe67); ++ INSTANCE_WR(ctx, 0x05498/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x054f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x05538/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05558/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x055d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05678/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05718/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x05758/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05778/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x057d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05938/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05958/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05978/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05998/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059b8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059d8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x059f8/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05a18/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05a38/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x05b38/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x05b58/4, 0x0000000f); ++ INSTANCE_WR(ctx, 0x05c58/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x05c78/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05df8/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x05e18/4, 0x04e3bfdf); ++ INSTANCE_WR(ctx, 0x05e38/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05e78/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x05e98/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x05ef8/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x06018/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06058/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x06078/4, 0x30201000); ++ INSTANCE_WR(ctx, 0x06098/4, 0x70605040); ++ INSTANCE_WR(ctx, 0x060b8/4, 0xb8a89888); ++ INSTANCE_WR(ctx, 0x060d8/4, 0xf8e8d8c8); ++ INSTANCE_WR(ctx, 0x06118/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x06158/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x063f8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06418/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06438/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x064d8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06538/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06558/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06578/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x06598/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x065b8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06a58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06a78/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x06a98/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06ab8/4, 0x03020100); ++ INSTANCE_WR(ctx, 0x06ad8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x06af8/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x06b18/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06bb8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x06bd8/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x06c58/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0aef8/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x0af18/4, 0x00000003); ++ INSTANCE_WR(ctx, 0x00abc/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00b1c/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x00b5c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00b9c/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00bdc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00bfc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00cdc/4, 0x00000804); ++ INSTANCE_WR(ctx, 0x00cfc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d1c/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x00d3c/4, 0x0000007f); ++ INSTANCE_WR(ctx, 0x00d7c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00d9c/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x00ddc/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00e1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x00e5c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x00edc/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x00efc/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x00fdc/4, 0x000007ff); ++ INSTANCE_WR(ctx, 0x00ffc/4, 0x00080c14); ++ INSTANCE_WR(ctx, 0x0171c/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0177c/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x01e9c/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x01ebc/4, 0x00000088); ++ INSTANCE_WR(ctx, 0x01f1c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x021fc/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x0225c/4, 0x3f800000); ++ INSTANCE_WR(ctx, 0x022dc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x022fc/4, 0x00000010); ++ INSTANCE_WR(ctx, 0x0281c/4, 0x00000052); ++ INSTANCE_WR(ctx, 0x0285c/4, 0x00000026); ++ INSTANCE_WR(ctx, 0x0289c/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x028bc/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x028fc/4, 0x0000001a); ++ INSTANCE_WR(ctx, 0x0295c/4, 0x00ffff00); ++ INSTANCE_WR(ctx, 0x41800/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41840/4, 0x00000080); ++ INSTANCE_WR(ctx, 0x41860/4, 0x80007004); ++ INSTANCE_WR(ctx, 0x41880/4, 0x04000400); ++ INSTANCE_WR(ctx, 0x418a0/4, 0x000000c0); ++ INSTANCE_WR(ctx, 0x418c0/4, 0x00001000); ++ INSTANCE_WR(ctx, 0x41920/4, 0x00000e00); ++ INSTANCE_WR(ctx, 0x41940/4, 0x00001e00); ++ INSTANCE_WR(ctx, 0x41960/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x419c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41a00/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x41a20/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x41ba0/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41be0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x41ca0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41cc0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41ce0/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41d00/4, 0x0000ffff); ++ INSTANCE_WR(ctx, 0x41d20/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41d40/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x41d60/4, 0x00010001); ++ INSTANCE_WR(ctx, 0x41d80/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x41dc0/4, 0x0001fe21); ++ INSTANCE_WR(ctx, 0x41e80/4, 0x08100c12); ++ INSTANCE_WR(ctx, 0x41ea0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x41ee0/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x41f00/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x42020/4, 0x0fac6881); ++ INSTANCE_WR(ctx, 0x420c0/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x42200/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x42220/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42240/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42260/4, 0x00000002); ++ INSTANCE_WR(ctx, 0x42280/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x422a0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x422c0/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x42300/4, 0x00000004); ++ INSTANCE_WR(ctx, 0x49700/4, 0x00000011); ++ INSTANCE_WR(ctx, 0x49740/4, 0x00000001); ++ INSTANCE_WR(ctx, 0x0012c/4, 0x00000002); ++} ++ ++int ++nv50_graph_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; ++ struct nouveau_engine *engine = &dev_priv->Engine; ++ int grctx_size = 0x70000, hdr; ++ int ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, ++ NVOBJ_FLAG_ZERO_ALLOC | ++ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); ++ if (ret) ++ return ret; ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002); ++ INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + ++ grctx_size - 1); ++ INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); ++ INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); ++ INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); ++ ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4, ++ chan->ramin->instance >> 12); ++ if (dev_priv->chipset == 0xaa) ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00004/4, 0x00000002); ++ else ++ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002); ++ ++ switch (dev_priv->chipset) { ++ case 0x50: ++ nv50_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x84: ++ nv84_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x86: ++ nv86_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0x92: ++ nv92_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ case 0xaa: ++ nvaa_graph_init_ctxvals(dev, chan->ramin_grctx); ++ break; ++ default: ++ /* This is complete crack, it accidently used to make at ++ * least some G8x cards work partially somehow, though there's ++ * no good reason why - and it stopped working as the rest ++ * of the code got off the drugs.. ++ */ ++ ret = engine->graph.load_context(chan); ++ if (ret) { ++ DRM_ERROR("Error hacking up context: %d\n", ret); ++ return ret; ++ } ++ break; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_graph_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int i, hdr; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ hdr = IS_G80 ? 0x200 : 0x20; ++ for (i=hdr; iramin->gpuobj, i/4, 0); ++ ++ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); ++} ++ ++static int ++nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t old_cp, tv = 20000; ++ int i; ++ ++ DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save); ++ ++ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400824, NV_READ(0x400824) | ++ (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : ++ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD)); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); ++ ++ for (i = 0; i < tv; i++) { ++ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) ++ break; ++ } ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); ++ ++ if (i == tv) { ++ DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); ++ DRM_ERROR("0x40030C = 0x%08x\n", ++ NV_READ(NV40_PGRAPH_CTXCTL_030C)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++int ++nv50_graph_load_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t inst = chan->ramin->instance >> 12; ++ int ret; (void)ret; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++#if 0 ++ if ((ret = nv50_graph_transfer_context(dev, inst, 0))) ++ return ret; ++#endif ++ ++ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); ++ NV_WRITE(0x400320, 4); ++ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31)); ++ ++ return 0; ++} ++ ++int ++nv50_graph_save_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ uint32_t inst = chan->ramin->instance >> 12; ++ ++ DRM_DEBUG("ch%d\n", chan->id); ++ ++ return nv50_graph_transfer_context(dev, inst, 1); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv50_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c +--- git/drivers/gpu/drm-tungsten/nv50_instmem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,324 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++typedef struct { ++ uint32_t save1700[5]; /* 0x1700->0x1710 */ ++ ++ struct nouveau_gpuobj_ref *pramin_pt; ++ struct nouveau_gpuobj_ref *pramin_bar; ++} nv50_instmem_priv; ++ ++#define NV50_INSTMEM_PAGE_SHIFT 12 ++#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) ++#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) ++ ++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN ++ */ ++#define BAR0_WI32(g,o,v) do { \ ++ uint32_t offset; \ ++ if ((g)->im_backing) { \ ++ offset = (g)->im_backing->start; \ ++ } else { \ ++ offset = chan->ramin->gpuobj->im_backing->start; \ ++ offset += (g)->im_pramin->start; \ ++ } \ ++ offset += (o); \ ++ NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \ ++} while(0) ++ ++int ++nv50_instmem_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; ++ nv50_instmem_priv *priv; ++ int ret, i; ++ uint32_t v; ++ ++ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); ++ if (!priv) ++ return -ENOMEM; ++ dev_priv->Engine.instmem.priv = priv; ++ ++ /* Save state, will restore at takedown. */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ priv->save1700[(i-0x1700)/4] = NV_READ(i); ++ ++ /* Reserve the last MiB of VRAM, we should probably try to avoid ++ * setting up the below tables over the top of the VBIOS image at ++ * some point. ++ */ ++ dev_priv->ramin_rsvd_vram = 1 << 20; ++ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; ++ c_size = 128 << 10; ++ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; ++ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; ++ c_base = c_vmpd + 0x4000; ++ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size); ++ ++ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset); ++ DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); ++ DRM_DEBUG(" Aperture size: %d MiB\n", ++ (uint32_t)dev_priv->ramin->size >> 20); ++ DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10); ++ ++ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); ++ ++ /* Create a fake channel, and use it as our "dummy" channels 0/127. ++ * The main reason for creating a channel is so we can use the gpuobj ++ * code. However, it's probably worth noting that NVIDIA also setup ++ * their channels 0/127 with the same values they configure here. ++ * So, there may be some other reason for doing this. ++ * ++ * Have to create the entire channel manually, as the real channel ++ * creation code assumes we have PRAMIN access, and we don't until ++ * we're done here. ++ */ ++ chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER); ++ if (!chan) ++ return -ENOMEM; ++ chan->id = 0; ++ chan->dev = dev; ++ chan->file_priv = (struct drm_file *)-2; ++ dev_priv->fifos[0] = dev_priv->fifos[127] = chan; ++ ++ /* Channel's PRAMIN object + heap */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0, ++ NULL, &chan->ramin))) ++ return ret; ++ ++ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) ++ return -ENOMEM; ++ ++ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, ++ 0x4000, 0, NULL, &chan->ramfc))) ++ return ret; ++ ++ for (i = 0; i < c_vmpd; i += 4) ++ BAR0_WI32(chan->ramin->gpuobj, i, 0); ++ ++ /* VM page directory */ ++ if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, ++ 0x4000, 0, &chan->vm_pd, NULL))) ++ return ret; ++ for (i = 0; i < 0x4000; i += 8) { ++ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); ++ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); ++ } ++ ++ /* PRAMIN page table, cheat and map into VM at 0x0000000000. ++ * We map the entire fake channel into the start of the PRAMIN BAR ++ */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, ++ 0, &priv->pramin_pt))) ++ return ret; ++ ++ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) { ++ if (v < (c_offset + c_size)) ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); ++ else ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); ++ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); ++ } ++ ++ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); ++ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); ++ ++ /* DMA object for PRAMIN BAR */ ++ if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, ++ &priv->pramin_bar))) ++ return ret; ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); ++ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); ++ ++ /* Poke the relevant regs, and pray it works :) */ ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); ++ NV_WRITE(NV50_PUNK_UNK1710, 0); ++ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | ++ NV50_PUNK_BAR_CFG_BASE_VALID); ++ NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0); ++ NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | ++ NV50_PUNK_BAR3_CTXDMA_VALID); ++ ++ /* Assume that praying isn't enough, check that we can re-read the ++ * entire fake channel back from the PRAMIN BAR */ ++ for (i = 0; i < c_size; i+=4) { ++ if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) { ++ DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i); ++ return -EINVAL; ++ } ++ } ++ ++ /* Global PRAMIN heap */ ++ if (nouveau_mem_init_heap(&dev_priv->ramin_heap, ++ c_size, dev_priv->ramin->size - c_size)) { ++ dev_priv->ramin_heap = NULL; ++ DRM_ERROR("Failed to init RAMIN heap\n"); ++ } ++ ++ /*XXX: incorrect, but needed to make hash func "work" */ ++ dev_priv->ramht_offset = 0x10000; ++ dev_priv->ramht_bits = 9; ++ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); ++ return 0; ++} ++ ++void ++nv50_instmem_takedown(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ struct nouveau_channel *chan = dev_priv->fifos[0]; ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ if (!priv) ++ return; ++ ++ /* Restore state from before init */ ++ for (i = 0x1700; i <= 0x1710; i+=4) ++ NV_WRITE(i, priv->save1700[(i-0x1700)/4]); ++ ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); ++ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); ++ ++ /* Destroy dummy channel */ ++ if (chan) { ++ nouveau_gpuobj_del(dev, &chan->vm_pd); ++ nouveau_gpuobj_ref_del(dev, &chan->ramfc); ++ nouveau_gpuobj_ref_del(dev, &chan->ramin); ++ nouveau_mem_takedown(&chan->ramin_heap); ++ ++ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; ++ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); ++ } ++ ++ dev_priv->Engine.instmem.priv = NULL; ++ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); ++} ++ ++int ++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) ++{ ++ if (gpuobj->im_backing) ++ return -EINVAL; ++ ++ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); ++ if (*sz == 0) ++ return -EINVAL; ++ ++ gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE, ++ *sz, NOUVEAU_MEM_FB | ++ NOUVEAU_MEM_NOVM, ++ (struct drm_file *)-2); ++ if (!gpuobj->im_backing) { ++ DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++void ++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (gpuobj && gpuobj->im_backing) { ++ if (gpuobj->im_bound) ++ dev_priv->Engine.instmem.unbind(dev, gpuobj); ++ nouveau_mem_free(dev, gpuobj->im_backing); ++ gpuobj->im_backing = NULL; ++ } ++} ++ ++int ++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end, vram; ++ ++ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) ++ return -EINVAL; ++ ++ DRM_DEBUG("st=0x%0llx sz=0x%0llx\n", ++ gpuobj->im_pramin->start, gpuobj->im_pramin->size); ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ vram = gpuobj->im_backing->start; ++ ++ DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", ++ gpuobj->im_pramin->start, pte, pte_end); ++ DRM_DEBUG("first vram page: 0x%llx\n", ++ gpuobj->im_backing->start); ++ ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ ++ pte += 8; ++ vram += NV50_INSTMEM_PAGE_SIZE; ++ } ++ ++ gpuobj->im_bound = 1; ++ return 0; ++} ++ ++int ++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; ++ uint32_t pte, pte_end; ++ ++ if (gpuobj->im_bound == 0) ++ return -EINVAL; ++ ++ pte = (gpuobj->im_pramin->start >> 12) << 3; ++ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; ++ while (pte < pte_end) { ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); ++ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); ++ pte += 8; ++ } ++ ++ gpuobj->im_bound = 0; ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv50_mc.c git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c +--- git/drivers/gpu/drm-tungsten/nv50_mc.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,43 @@ ++/* ++ * Copyright (C) 2007 Ben Skeggs. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "nouveau_drv.h" ++ ++int ++nv50_mc_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); ++ ++ return 0; ++} ++ ++void nv50_mc_takedown(struct drm_device *dev) ++{ ++} +diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.c git-nokia/drivers/gpu/drm-tungsten/nv_drv.c +--- git/drivers/gpu/drm-tungsten/nv_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,94 @@ ++/* nv_drv.c -- nv driver -*- linux-c -*- ++ * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * Copyright 2005 Lars Knoll ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Daryll Strauss ++ * Gareth Hughes ++ * Lars Knoll ++ */ ++ ++#include "drmP.h" ++#include "nv_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ nv_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR | DRIVER_USE_AGP, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init nv_init(void) ++{ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit nv_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(nv_init); ++module_exit(nv_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.h git-nokia/drivers/gpu/drm-tungsten/nv_drv.h +--- git/drivers/gpu/drm-tungsten/nv_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,52 @@ ++/* nv_drv.h -- NV DRM template customization -*- linux-c -*- ++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com ++ * ++ * Copyright 2005 Lars Knoll ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Lars Knoll ++ */ ++ ++#ifndef __NV_H__ ++#define __NV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Lars Knoll" ++ ++#define DRIVER_NAME "nv" ++#define DRIVER_DESC "NV" ++#define DRIVER_DATE "20051006" ++ ++#define DRIVER_MAJOR 0 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 1 ++ ++#define NV04 04 ++#define NV10 10 ++#define NV20 20 ++#define NV30 30 ++#define NV40 40 ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drm.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h +--- git/drivers/gpu/drm-tungsten/pvr2d_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,42 @@ ++/* pvr2d_drm.h -- Public header for the PVR2D helper module -*- linux-c -*- */ ++ ++#ifndef __PVR2D_DRM_H__ ++#define __PVR2D_DRM_H__ ++ ++ ++/* This wouldn't work with 64 bit userland */ ++struct drm_pvr2d_virt2phys { ++ uint32_t virt; ++ uint32_t length; ++ uint32_t phys_array; ++ uint32_t handle; ++}; ++ ++struct drm_pvr2d_buf_release { ++ uint32_t handle; ++}; ++ ++enum drm_pvr2d_cflush_type { ++ DRM_PVR2D_CFLUSH_FROM_GPU = 1, ++ DRM_PVR2D_CFLUSH_TO_GPU = 2 ++}; ++ ++struct drm_pvr2d_cflush { ++ enum drm_pvr2d_cflush_type type; ++ uint32_t virt; ++ uint32_t length; ++}; ++ ++#define DRM_PVR2D_VIRT2PHYS 0x0 ++#define DRM_PVR2D_BUF_RELEASE 0x1 ++#define DRM_PVR2D_CFLUSH 0x2 ++ ++#define DRM_IOCTL_PVR2D_VIRT2PHYS DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR2D_VIRT2PHYS, \ ++ struct drm_pvr2d_virt2phys) ++#define DRM_IOCTL_PVR2D_BUF_RELEASE DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_BUF_RELEASE, \ ++ struct drm_pvr2d_buf_release) ++#define DRM_IOCTL_PVR2D_CFLUSH DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_CFLUSH, \ ++ struct drm_pvr2d_cflush) ++ ++ ++#endif /* __PVR2D_DRM_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.c git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c +--- git/drivers/gpu/drm-tungsten/pvr2d_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,537 @@ ++ ++#include "drmP.h" ++#include "drm_pciids.h" ++ ++#include "pvr2d_drm.h" ++#include "pvr2d_drv.h" ++ ++#define PVR2D_SHMEM_HASH_ORDER 12 ++ ++struct pvr2d_dev { ++ rwlock_t hash_lock; ++ struct drm_open_hash shmem_hash; ++}; ++ ++struct pvr2d_buf { ++ struct pvr2d_dev *dev_priv; ++ struct drm_hash_item hash; ++ struct page **pages; ++ struct kref kref; ++ uint32_t num_pages; ++}; ++ ++/* ++ * This pvr2d_ref object is needed strictly because ++ * idr_for_each doesn't exist in 2.6.22. With kernels ++ * supporting this function, we can use it to traverse ++ * the file list of buffers at file release. ++ */ ++ ++struct pvr2d_ref{ ++ struct list_head head; ++ struct pvr2d_buf *buf; ++}; ++ ++struct pvr2d_file { ++ spinlock_t lock; ++ struct list_head ref_list; ++ struct idr buf_idr; ++}; ++ ++static inline struct pvr2d_dev *pvr2d_dp(struct drm_device *dev) ++{ ++ return (struct pvr2d_dev *) dev->dev_private; ++} ++ ++static inline struct pvr2d_file *pvr2d_fp(struct drm_file *file_priv) ++{ ++ return (struct pvr2d_file *) file_priv->driver_priv; ++} ++ ++ ++static void ++pvr2d_free_buf(struct pvr2d_buf *buf) ++{ ++ uint32_t i; ++ ++ for (i=0; inum_pages; ++i) { ++ struct page *page = buf->pages[i]; ++ ++ if (!PageReserved(page)) ++ set_page_dirty_lock(page); ++ ++ put_page(page); ++ } ++ ++ kfree(buf->pages); ++ kfree(buf); ++} ++ ++static void ++pvr2d_release_buf(struct kref *kref) ++{ ++ struct pvr2d_buf *buf = ++ container_of(kref, struct pvr2d_buf, kref); ++ ++ struct pvr2d_dev *dev_priv = buf->dev_priv; ++ ++ drm_ht_remove_item(&dev_priv->shmem_hash, &buf->hash); ++ write_unlock(&dev_priv->hash_lock); ++ pvr2d_free_buf(buf); ++ write_lock(&dev_priv->hash_lock); ++} ++ ++static struct pvr2d_buf * ++pvr2d_alloc_buf(struct pvr2d_dev *dev_priv, uint32_t num_pages) ++{ ++ struct pvr2d_buf *buf = kmalloc(sizeof(*buf), GFP_KERNEL); ++ ++ if (unlikely(!buf)) ++ return NULL; ++ ++ buf->pages = kmalloc(num_pages * sizeof(*buf->pages), GFP_KERNEL); ++ if (unlikely(!buf->pages)) ++ goto out_err0; ++ ++ buf->dev_priv = dev_priv; ++ buf->num_pages = num_pages; ++ ++ ++ DRM_DEBUG("pvr2d_alloc_buf successfully completed.\n"); ++ return buf; ++ ++out_err0: ++ kfree(buf); ++ ++ return NULL; ++} ++ ++ ++static struct pvr2d_buf* ++pvr2d_lookup_buf(struct pvr2d_dev *dev_priv, struct page *first_phys) ++{ ++ struct drm_hash_item *hash; ++ struct pvr2d_buf *buf = NULL; ++ int ret; ++ ++ read_lock(&dev_priv->hash_lock); ++ ret = drm_ht_find_item(&dev_priv->shmem_hash, ++ (unsigned long)first_phys, ++ &hash); ++ ++ if (likely(ret == 0)) { ++ buf = drm_hash_entry(hash, struct pvr2d_buf, hash); ++ kref_get(&buf->kref); ++ } ++ read_unlock(&dev_priv->hash_lock); ++ ++ if (buf != NULL) { ++ DRM_INFO("pvr2d_lookup_buf found already used buffer.\n"); ++ } ++ ++ return buf; ++} ++ ++ ++static int ++pvr2d_virt2phys(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_pvr2d_virt2phys *v2p = data; ++ uint32_t i; ++ unsigned nr_pages = ((v2p->virt & ~PAGE_MASK) + v2p->length + PAGE_SIZE - ++ 1) / PAGE_SIZE; ++ struct page *first_page; ++ struct pvr2d_buf *buf = NULL; ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_ref *ref; ++ int ret; ++ ++ ++ /* ++ * Obtain a global hash key for the pvr2d buffer structure. ++ * We use the address of the struct page of the first ++ * page. ++ */ ++ ++ down_read(¤t->mm->mmap_sem); ++ ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK, ++ 1, WRITE, 0, &first_page, NULL); ++ up_read(¤t->mm->mmap_sem); ++ ++ if (unlikely(ret < 1)) { ++ DRM_ERROR("Failed getting first page: %d\n", ret); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Look up buffer already in the hash table, or create ++ * and insert a new one. ++ */ ++ ++ while(buf == NULL) { ++ buf = pvr2d_lookup_buf(dev_priv, first_page); ++ ++ if (likely(buf != NULL)) ++ break; ++ ++ buf = pvr2d_alloc_buf(dev_priv, nr_pages); ++ if (unlikely(buf == NULL)) { ++ DRM_ERROR("Failed allocating pvr2d buffer.\n"); ++ ret = -ENOMEM; ++ goto out_put; ++ } ++ ++ down_read(¤t->mm->mmap_sem); ++ ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK, ++ nr_pages, WRITE, 0, buf->pages, NULL); ++ up_read(¤t->mm->mmap_sem); ++ ++ if (unlikely(ret < nr_pages)) { ++ DRM_ERROR("Failed getting user pages.\n"); ++ buf->num_pages = ret; ++ ret = -ENOMEM; ++ pvr2d_free_buf(buf); ++ goto out_put; ++ } ++ ++ kref_init(&buf->kref); ++ buf->hash.key = (unsigned long) first_page; ++ ++ write_lock(&dev_priv->hash_lock); ++ ret = drm_ht_insert_item(&dev_priv->shmem_hash, &buf->hash); ++ write_unlock(&dev_priv->hash_lock); ++ ++ if (unlikely(ret == -EINVAL)) { ++ ++ /* ++ * Somebody raced us and already ++ * inserted this buffer. ++ * Very unlikely, but retry anyway. ++ */ ++ ++ pvr2d_free_buf(buf); ++ buf = NULL; ++ } ++ } ++ ++ /* ++ * Create a reference object that is used for unreferencing ++ * either by user action or when the drm file is closed. ++ */ ++ ++ ref = kmalloc(sizeof(*ref), GFP_KERNEL); ++ if (unlikely(ref == NULL)) ++ goto out_err0; ++ ++ ref->buf = buf; ++ do { ++ if (idr_pre_get(&pvr2d_fpriv->buf_idr, GFP_KERNEL) == 0) { ++ ret = -ENOMEM; ++ DRM_ERROR("Failed idr_pre_get\n"); ++ goto out_err1; ++ } ++ ++ spin_lock( &pvr2d_fpriv->lock ); ++ ret = idr_get_new( &pvr2d_fpriv->buf_idr, ref, &v2p->handle); ++ ++ if (likely(ret == 0)) ++ list_add_tail(&ref->head, &pvr2d_fpriv->ref_list); ++ ++ spin_unlock( &pvr2d_fpriv->lock ); ++ ++ } while (unlikely(ret == -EAGAIN)); ++ ++ if (unlikely(ret != 0)) ++ goto out_err1; ++ ++ ++ /* ++ * Copy info to user-space. ++ */ ++ ++ DRM_DEBUG("Converting range of %u bytes at virtual 0x%08x, physical array at 0x%08x\n", ++ v2p->length, v2p->virt, v2p->phys_array); ++ ++ for (i = 0; i < nr_pages; i++) { ++ uint32_t physical = (uint32_t)page_to_pfn(buf->pages[i]) << PAGE_SHIFT; ++ DRM_DEBUG("Virtual 0x%08lx => Physical 0x%08x\n", ++ v2p->virt + i * PAGE_SIZE, physical); ++ ++ if (DRM_COPY_TO_USER((void*)(v2p->phys_array + ++ i * sizeof(uint32_t)), ++ &physical, sizeof(uint32_t))) { ++ ret = -EFAULT; ++ goto out_err2; ++ } ++ ++ } ++ ++#ifdef CONFIG_X86 ++ /* XXX: Quick'n'dirty hack to avoid corruption on Poulsbo, remove when ++ * there's a better solution ++ */ ++ wbinvd(); ++#endif ++ ++ DRM_DEBUG("pvr2d_virt2phys returning handle 0x%08x\n", ++ v2p->handle); ++ ++out_put: ++ put_page(first_page); ++ return ret; ++ ++out_err2: ++ spin_lock( &pvr2d_fpriv->lock ); ++ list_del(&ref->head); ++ idr_remove( &pvr2d_fpriv->buf_idr, v2p->handle); ++ spin_unlock( &pvr2d_fpriv->lock ); ++out_err1: ++ kfree(ref); ++out_err0: ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ put_page(first_page); ++ return ret; ++} ++ ++ ++static int ++pvr2d_buf_release(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct drm_pvr2d_buf_release *br = data; ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_buf *buf; ++ struct pvr2d_ref *ref; ++ ++ DRM_DEBUG("pvr2d_buf_release releasing 0x%08x\n", ++ br->handle); ++ ++ spin_lock( &pvr2d_fpriv->lock ); ++ ref = idr_find( &pvr2d_fpriv->buf_idr, br->handle); ++ ++ if (unlikely(ref == NULL)) { ++ spin_unlock( &pvr2d_fpriv->lock ); ++ DRM_ERROR("Could not find pvr2d buf to unref.\n"); ++ return -EINVAL; ++ } ++ (void) idr_remove( &pvr2d_fpriv->buf_idr, br->handle); ++ list_del(&ref->head); ++ spin_unlock( &pvr2d_fpriv->lock ); ++ ++ buf = ref->buf; ++ kfree(ref); ++ ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ ++ return 0; ++} ++ ++static int ++pvr2d_cflush(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_pvr2d_cflush *cf = data; ++ ++ switch (cf->type) { ++ case DRM_PVR2D_CFLUSH_FROM_GPU: ++ DRM_DEBUG("DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n", ++ cf->virt, cf->length); ++#ifdef CONFIG_ARM ++ dmac_inv_range((const void*)cf->virt, ++ (const void*)(cf->virt + cf->length)); ++#endif ++ return 0; ++ case DRM_PVR2D_CFLUSH_TO_GPU: ++ DRM_DEBUG("DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n", ++ cf->virt, cf->length); ++#ifdef CONFIG_ARM ++ dmac_clean_range((const void*)cf->virt, ++ (const void*)(cf->virt + cf->length)); ++#endif ++ return 0; ++ default: ++ DRM_ERROR("Invalid cflush type 0x%x\n", cf->type); ++ return -EINVAL; ++ } ++} ++ ++static int ++pvr2d_open(struct inode *inode, struct file *filp) ++{ ++ int ret; ++ struct pvr2d_file *pvr2d_fpriv; ++ struct drm_file *file_priv; ++ ++ pvr2d_fpriv = kmalloc(sizeof(*pvr2d_fpriv), GFP_KERNEL); ++ if (unlikely(pvr2d_fpriv == NULL)) ++ return -ENOMEM; ++ ++ pvr2d_fpriv->lock = SPIN_LOCK_UNLOCKED; ++ INIT_LIST_HEAD(&pvr2d_fpriv->ref_list); ++ idr_init(&pvr2d_fpriv->buf_idr); ++ ++ ret = drm_open(inode, filp); ++ ++ if (unlikely(ret != 0)) { ++ idr_destroy(&pvr2d_fpriv->buf_idr); ++ kfree(pvr2d_fpriv); ++ return ret; ++ } ++ ++ file_priv = filp->private_data; ++ file_priv->driver_priv = pvr2d_fpriv; ++ ++ DRM_DEBUG("pvr2d_open completed successfully.\n"); ++ return 0; ++}; ++ ++ ++static int ++pvr2d_release(struct inode *inode, struct file *filp) ++{ ++ struct drm_file *file_priv = filp->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv); ++ struct pvr2d_buf *buf; ++ struct pvr2d_ref *ref, *next; ++ ++ /* ++ * At this point we're the only user of the list, so ++ * it should be safe to release the file lock whenever we want to. ++ */ ++ ++ spin_lock(&pvr2d_fpriv->lock); ++ ++ list_for_each_entry_safe(ref, next, &pvr2d_fpriv->ref_list, ++ head) { ++ list_del(&ref->head); ++ buf = ref->buf; ++ kfree(ref); ++ spin_unlock(&pvr2d_fpriv->lock); ++ write_lock(&dev_priv->hash_lock); ++ kref_put(&buf->kref, &pvr2d_release_buf); ++ write_unlock(&dev_priv->hash_lock); ++ spin_lock(&pvr2d_fpriv->lock); ++ } ++ ++ idr_remove_all(&pvr2d_fpriv->buf_idr); ++ idr_destroy(&pvr2d_fpriv->buf_idr); ++ spin_unlock(&pvr2d_fpriv->lock); ++ ++ kfree(pvr2d_fpriv); ++ ++ DRM_DEBUG("pvr2d_release calling drm_release.\n"); ++ return drm_release(inode, filp); ++} ++ ++static int pvr2d_load(struct drm_device *dev, unsigned long chipset) ++{ ++ struct pvr2d_dev *dev_priv; ++ int ret; ++ ++ dev_priv = kmalloc(sizeof(*dev_priv), GFP_KERNEL); ++ if (unlikely(dev_priv == NULL)) ++ return -ENOMEM; ++ ++ rwlock_init(&dev_priv->hash_lock); ++ ret = drm_ht_create(&dev_priv->shmem_hash, ++ PVR2D_SHMEM_HASH_ORDER); ++ ++ if (unlikely(ret != 0)) ++ goto out_err0; ++ ++ dev->dev_private = dev_priv; ++ ++ DRM_DEBUG("pvr2d_load completed successfully.\n"); ++ return 0; ++out_err0: ++ kfree(dev_priv); ++ return ret; ++} ++ ++ ++static int pvr2d_unload(struct drm_device *dev) ++{ ++ struct pvr2d_dev *dev_priv = pvr2d_dp(dev); ++ ++ drm_ht_remove(&dev_priv->shmem_hash); ++ kfree(dev_priv); ++ DRM_DEBUG("pvr2d_unload completed successfully.\n"); ++ return 0; ++} ++ ++static struct pci_device_id pciidlist[] = { ++ pvr2d_PCI_IDS ++}; ++ ++struct drm_ioctl_desc pvr2d_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_PVR2D_VIRT2PHYS, pvr2d_virt2phys, 0), ++ DRM_IOCTL_DEF(DRM_PVR2D_BUF_RELEASE, pvr2d_buf_release, 0), ++ DRM_IOCTL_DEF(DRM_PVR2D_CFLUSH, pvr2d_cflush, 0) ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_MTRR, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = pvr2d_ioctls, ++ .num_ioctls = DRM_ARRAY_SIZE(pvr2d_ioctls), ++ .load = pvr2d_load, ++ .unload = pvr2d_unload, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = pvr2d_open, ++ .release = pvr2d_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init pvr2d_init(void) ++{ ++#ifdef CONFIG_PCI ++ return drm_init(&driver, pciidlist); ++#else ++ return drm_get_dev(NULL, NULL, &driver); ++#endif ++} ++ ++static void __exit pvr2d_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(pvr2d_init); ++module_exit(pvr2d_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h +--- git/drivers/gpu/drm-tungsten/pvr2d_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,19 @@ ++/* -*- linux-c -*- */ ++ ++#ifndef __PVR2D_H__ ++#define __PVR2D_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Tungsten Graphics Inc." ++ ++#define DRIVER_NAME "pvr2d" ++#define DRIVER_DESC "PVR2D kernel helper" ++#define DRIVER_DATE "20080811" ++ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 0 ++#define DRIVER_PATCHLEVEL 0 ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/r128_cce.c git-nokia/drivers/gpu/drm-tungsten/r128_cce.c +--- git/drivers/gpu/drm-tungsten/r128_cce.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_cce.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,933 @@ ++/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- ++ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com ++ */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++#define R128_FIFO_DEBUG 0 ++ ++/* CCE microcode (from ATI) */ ++static u32 r128_cce_microcode[] = { ++ 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0, ++ 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0, ++ 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1, ++ 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11, ++ 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28, ++ 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9, ++ 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656, ++ 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1, ++ 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071, ++ 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2, ++ 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1, ++ 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1, ++ 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1, ++ 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2, ++ 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1, ++ 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82, ++ 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729, ++ 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008, ++ 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0, ++ 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1, ++ 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1, ++ 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0, ++ 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370, ++ 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1, ++ 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793, ++ 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1, ++ 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1, ++ 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1, ++ 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1, ++ 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894, ++ 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14, ++ 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1, ++ 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1, ++ 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1, ++ 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ++}; ++ ++static int R128_READ_PLL(struct drm_device * dev, int addr) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f); ++ return R128_READ(R128_CLOCK_CNTL_DATA); ++} ++ ++#if R128_FIFO_DEBUG ++static void r128_status(drm_r128_private_t * dev_priv) ++{ ++ printk("GUI_STAT = 0x%08x\n", ++ (unsigned int)R128_READ(R128_GUI_STAT)); ++ printk("PM4_STAT = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_STAT)); ++ printk("PM4_BUFFER_DL_WPTR = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR)); ++ printk("PM4_BUFFER_DL_RPTR = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR)); ++ printk("PM4_MICRO_CNTL = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_MICRO_CNTL)); ++ printk("PM4_BUFFER_CNTL = 0x%08x\n", ++ (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL)); ++} ++#endif ++ ++/* ================================================================ ++ * Engine, FIFO control ++ */ ++ ++static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) ++{ ++ u32 tmp; ++ int i; ++ ++ tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL; ++ R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) { ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) ++{ ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK; ++ if (slots >= entries) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) ++{ ++ int i, ret; ++ ++ ret = r128_do_wait_for_fifo(dev_priv, 64); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) { ++ r128_do_pixcache_flush(dev_priv); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++/* ================================================================ ++ * CCE control, initialization ++ */ ++ ++/* Load the microcode for the CCE */ ++static void r128_cce_load_microcode(drm_r128_private_t * dev_priv) ++{ ++ int i; ++ ++ DRM_DEBUG("\n"); ++ ++ r128_do_wait_for_idle(dev_priv); ++ ++ R128_WRITE(R128_PM4_MICROCODE_ADDR, 0); ++ for (i = 0; i < 256; i++) { ++ R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]); ++ R128_WRITE(R128_PM4_MICROCODE_DATAL, ++ r128_cce_microcode[i * 2 + 1]); ++ } ++} ++ ++/* Flush any pending commands to the CCE. This should only be used just ++ * prior to a wait for idle, as it informs the engine that the command ++ * stream is ending. ++ */ ++static void r128_do_cce_flush(drm_r128_private_t * dev_priv) ++{ ++ u32 tmp; ++ ++ tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE; ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp); ++} ++ ++/* Wait for the CCE to go idle. ++ */ ++int r128_do_cce_idle(drm_r128_private_t * dev_priv) ++{ ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) { ++ int pm4stat = R128_READ(R128_PM4_STAT); ++ if (((pm4stat & R128_PM4_FIFOCNT_MASK) >= ++ dev_priv->cce_fifo_size) && ++ !(pm4stat & (R128_PM4_BUSY | ++ R128_PM4_GUI_ACTIVE))) { ++ return r128_do_pixcache_flush(dev_priv); ++ } ++ } ++ DRM_UDELAY(1); ++ } ++ ++#if R128_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ r128_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++/* Start the Concurrent Command Engine. ++ */ ++static void r128_do_cce_start(drm_r128_private_t * dev_priv) ++{ ++ r128_do_wait_for_idle(dev_priv); ++ ++ R128_WRITE(R128_PM4_BUFFER_CNTL, ++ dev_priv->cce_mode | dev_priv->ring.size_l2qw ++ | R128_PM4_BUFFER_CNTL_NOUPDATE); ++ R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */ ++ R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN); ++ ++ dev_priv->cce_running = 1; ++} ++ ++/* Reset the Concurrent Command Engine. This will not flush any pending ++ * commands, so you must wait for the CCE command stream to complete ++ * before calling this routine. ++ */ ++static void r128_do_cce_reset(drm_r128_private_t * dev_priv) ++{ ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); ++ R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); ++ dev_priv->ring.tail = 0; ++} ++ ++/* Stop the Concurrent Command Engine. This will not flush any pending ++ * commands, so you must flush the command stream and wait for the CCE ++ * to go idle before calling this routine. ++ */ ++static void r128_do_cce_stop(drm_r128_private_t * dev_priv) ++{ ++ R128_WRITE(R128_PM4_MICRO_CNTL, 0); ++ R128_WRITE(R128_PM4_BUFFER_CNTL, ++ R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE); ++ ++ dev_priv->cce_running = 0; ++} ++ ++/* Reset the engine. This will stop the CCE if it is running. ++ */ ++static int r128_do_engine_reset(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; ++ ++ r128_do_pixcache_flush(dev_priv); ++ ++ clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX); ++ mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL); ++ ++ R128_WRITE_PLL(R128_MCLK_CNTL, ++ mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP); ++ ++ gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL); ++ ++ /* Taken from the sample code - do not change */ ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI); ++ R128_READ(R128_GEN_RESET_CNTL); ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI); ++ R128_READ(R128_GEN_RESET_CNTL); ++ ++ R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl); ++ R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index); ++ R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl); ++ ++ /* Reset the CCE ring */ ++ r128_do_cce_reset(dev_priv); ++ ++ /* The CCE is no longer running after an engine reset */ ++ dev_priv->cce_running = 0; ++ ++ /* Reset any pending vertex, indirect buffers */ ++ r128_freelist_reset(dev); ++ ++ return 0; ++} ++ ++static void r128_cce_init_ring_buffer(struct drm_device * dev, ++ drm_r128_private_t * dev_priv) ++{ ++ u32 ring_start; ++ u32 tmp; ++ ++ DRM_DEBUG("\n"); ++ ++ /* The manual (p. 2) says this address is in "VM space". This ++ * means it's an offset from the start of AGP space. ++ */ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) ++ ring_start = dev_priv->cce_ring->offset - dev->agp->base; ++ else ++#endif ++ ring_start = dev_priv->cce_ring->offset - ++ (unsigned long)dev->sg->virtual; ++ ++ R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); ++ ++ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); ++ R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); ++ ++ /* Set watermark control */ ++ R128_WRITE(R128_PM4_BUFFER_WM_CNTL, ++ ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT) ++ | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT) ++ | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT) ++ | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT)); ++ ++ /* Force read. Why? Because it's in the examples... */ ++ R128_READ(R128_PM4_BUFFER_ADDR); ++ ++ /* Turn on bus mastering */ ++ tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS; ++ R128_WRITE(R128_BUS_CNTL, tmp); ++} ++ ++static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) ++{ ++ drm_r128_private_t *dev_priv; ++ ++ DRM_DEBUG("\n"); ++ ++ dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_r128_private_t)); ++ ++ dev_priv->is_pci = init->is_pci; ++ ++ if (dev_priv->is_pci && !dev->sg) { ++ DRM_ERROR("PCI GART memory not allocated!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->usec_timeout = init->usec_timeout; ++ if (dev_priv->usec_timeout < 1 || ++ dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) { ++ DRM_DEBUG("TIMEOUT problem!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->cce_mode = init->cce_mode; ++ ++ /* GH: Simple idle check. ++ */ ++ atomic_set(&dev_priv->idle_count, 0); ++ ++ /* We don't support anything other than bus-mastering ring mode, ++ * but the ring can be in either AGP or PCI space for the ring ++ * read pointer. ++ */ ++ if ((init->cce_mode != R128_PM4_192BM) && ++ (init->cce_mode != R128_PM4_128BM_64INDBM) && ++ (init->cce_mode != R128_PM4_64BM_128INDBM) && ++ (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) { ++ DRM_DEBUG("Bad cce_mode!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ switch (init->cce_mode) { ++ case R128_PM4_NONPM4: ++ dev_priv->cce_fifo_size = 0; ++ break; ++ case R128_PM4_192PIO: ++ case R128_PM4_192BM: ++ dev_priv->cce_fifo_size = 192; ++ break; ++ case R128_PM4_128PIO_64INDBM: ++ case R128_PM4_128BM_64INDBM: ++ dev_priv->cce_fifo_size = 128; ++ break; ++ case R128_PM4_64PIO_128INDBM: ++ case R128_PM4_64BM_128INDBM: ++ case R128_PM4_64PIO_64VCBM_64INDBM: ++ case R128_PM4_64BM_64VCBM_64INDBM: ++ case R128_PM4_64PIO_64VCPIO_64INDPIO: ++ dev_priv->cce_fifo_size = 64; ++ break; ++ } ++ ++ switch (init->fb_bpp) { ++ case 16: ++ dev_priv->color_fmt = R128_DATATYPE_RGB565; ++ break; ++ case 32: ++ default: ++ dev_priv->color_fmt = R128_DATATYPE_ARGB8888; ++ break; ++ } ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ switch (init->depth_bpp) { ++ case 16: ++ dev_priv->depth_fmt = R128_DATATYPE_RGB565; ++ break; ++ case 24: ++ case 32: ++ default: ++ dev_priv->depth_fmt = R128_DATATYPE_ARGB8888; ++ break; ++ } ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ dev_priv->span_offset = init->span_offset; ++ ++ dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) | ++ (dev_priv->front_offset >> 5)); ++ dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) | ++ (dev_priv->back_offset >> 5)); ++ dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | ++ (dev_priv->depth_offset >> 5) | ++ R128_DST_TILE); ++ dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) | ++ (dev_priv->span_offset >> 5)); ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); ++ if (!dev_priv->mmio) { ++ DRM_ERROR("could not find mmio region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->cce_ring) { ++ DRM_ERROR("could not find cce ring region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); ++ if (!dev_priv->ring_rptr) { ++ DRM_ERROR("could not find ring read pointer!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find dma buffer region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ ++ if (!dev_priv->is_pci) { ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("could not find agp texture region!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) { ++ drm_core_ioremap(dev_priv->cce_ring, dev); ++ drm_core_ioremap(dev_priv->ring_rptr, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev_priv->cce_ring->handle || ++ !dev_priv->ring_rptr->handle || ++ !dev->agp_buffer_map->handle) { ++ DRM_ERROR("Could not ioremap agp regions!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -ENOMEM; ++ } ++ } else ++#endif ++ { ++ dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset; ++ dev_priv->ring_rptr->handle = ++ (void *)dev_priv->ring_rptr->offset; ++ dev->agp_buffer_map->handle = ++ (void *)dev->agp_buffer_map->offset; ++ } ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) ++ dev_priv->cce_buffers_offset = dev->agp->base; ++ else ++#endif ++ dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; ++ ++ dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; ++ dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle ++ + init->ring_size / sizeof(u32)); ++ dev_priv->ring.size = init->ring_size; ++ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); ++ ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ ++ dev_priv->ring.high_mark = 128; ++ ++ dev_priv->sarea_priv->last_frame = 0; ++ R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ++ ++ dev_priv->sarea_priv->last_dispatch = 0; ++ R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->is_pci) { ++#endif ++ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); ++ dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; ++ dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE; ++ dev_priv->gart_info.addr = NULL; ++ dev_priv->gart_info.bus_addr = 0; ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { ++ DRM_ERROR("failed to init PCI GART!\n"); ++ dev->dev_private = (void *)dev_priv; ++ r128_do_cleanup_cce(dev); ++ return -ENOMEM; ++ } ++ R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); ++#if __OS_HAS_AGP ++ } ++#endif ++ ++ r128_cce_init_ring_buffer(dev, dev_priv); ++ r128_cce_load_microcode(dev_priv); ++ ++ dev->dev_private = (void *)dev_priv; ++ ++ r128_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++int r128_do_cleanup_cce(struct drm_device * dev) ++{ ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++ if (dev->dev_private) { ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++#if __OS_HAS_AGP ++ if (!dev_priv->is_pci) { ++ if (dev_priv->cce_ring != NULL) ++ drm_core_ioremapfree(dev_priv->cce_ring, dev); ++ if (dev_priv->ring_rptr != NULL) ++ drm_core_ioremapfree(dev_priv->ring_rptr, dev); ++ if (dev->agp_buffer_map != NULL) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } else ++#endif ++ { ++ if (dev_priv->gart_info.bus_addr) ++ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) ++ DRM_ERROR("failed to cleanup PCI GART!\n"); ++ } ++ ++ drm_free(dev->dev_private, sizeof(drm_r128_private_t), ++ DRM_MEM_DRIVER); ++ dev->dev_private = NULL; ++ } ++ ++ return 0; ++} ++ ++int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_init_t *init = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case R128_INIT_CCE: ++ return r128_do_init_cce(dev, init); ++ case R128_CLEANUP_CCE: ++ return r128_do_cleanup_cce(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { ++ DRM_DEBUG("while CCE running\n"); ++ return 0; ++ } ++ ++ r128_do_cce_start(dev_priv); ++ ++ return 0; ++} ++ ++/* Stop the CCE. The engine must have been idled before calling this ++ * routine. ++ */ ++int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_cce_stop_t *stop = data; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Flush any pending CCE commands. This ensures any outstanding ++ * commands are exectuted by the engine before we turn it off. ++ */ ++ if (stop->flush) { ++ r128_do_cce_flush(dev_priv); ++ } ++ ++ /* If we fail to make the engine go idle, we return an error ++ * code so that the DRM ioctl wrapper can try again. ++ */ ++ if (stop->idle) { ++ ret = r128_do_cce_idle(dev_priv); ++ if (ret) ++ return ret; ++ } ++ ++ /* Finally, we can turn off the CCE. If the engine isn't idle, ++ * we will get some dropped triangles as they won't be fully ++ * rendered before the CCE is shut down. ++ */ ++ r128_do_cce_stop(dev_priv); ++ ++ /* Reset the engine */ ++ r128_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++/* Just reset the CCE ring. Called as part of an X Server engine reset. ++ */ ++int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_DEBUG("called before init done\n"); ++ return -EINVAL; ++ } ++ ++ r128_do_cce_reset(dev_priv); ++ ++ /* The CCE is no longer running after an engine reset */ ++ dev_priv->cce_running = 0; ++ ++ return 0; ++} ++ ++int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cce_running) { ++ r128_do_cce_flush(dev_priv); ++ } ++ ++ return r128_do_cce_idle(dev_priv); ++} ++ ++int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return r128_do_engine_reset(dev); ++} ++ ++int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ return -EINVAL; ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++#define R128_BUFFER_USED 0xffffffff ++#define R128_BUFFER_FREE 0 ++ ++#if 0 ++static int r128_freelist_init(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_freelist_t *entry; ++ int i; ++ ++ dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); ++ if (dev_priv->head == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); ++ dev_priv->head->age = R128_BUFFER_USED; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ ++ entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->age = R128_BUFFER_FREE; ++ entry->buf = buf; ++ entry->prev = dev_priv->head; ++ entry->next = dev_priv->head->next; ++ if (!entry->next) ++ dev_priv->tail = entry; ++ ++ buf_priv->discard = 0; ++ buf_priv->dispatched = 0; ++ buf_priv->list_entry = entry; ++ ++ dev_priv->head->next = entry; ++ ++ if (dev_priv->head->next) ++ dev_priv->head->next->prev = entry; ++ } ++ ++ return 0; ++ ++} ++#endif ++ ++static struct drm_buf *r128_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ ++ /* FIXME: Optimize -- use freelist code */ ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == 0) ++ return buf; ++ } ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ u32 done_age = R128_READ(R128_LAST_DISPATCH_REG); ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->pending && buf_priv->age <= done_age) { ++ /* The buffer has been processed, so it ++ * can now be used. ++ */ ++ buf->pending = 0; ++ return buf; ++ } ++ } ++ DRM_UDELAY(1); ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++void r128_freelist_reset(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int i; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ buf_priv->age = 0; ++ } ++} ++ ++/* ================================================================ ++ * CCE command submission ++ */ ++ ++int r128_wait_ring(drm_r128_private_t * dev_priv, int n) ++{ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; ++ int i; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ r128_update_ring_snapshot(dev_priv); ++ if (ring->space >= n) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This is being ignored... */ ++ DRM_ERROR("failed!\n"); ++ return -EBUSY; ++} ++ ++static int r128_cce_get_buffers(struct drm_device * dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = r128_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int ret = 0; ++ struct drm_dma *d = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = r128_cce_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/r128_drm.h git-nokia/drivers/gpu/drm-tungsten/r128_drm.h +--- git/drivers/gpu/drm-tungsten/r128_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,326 @@ ++/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- ++ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com ++ */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Kevin E. Martin ++ */ ++ ++#ifndef __R128_DRM_H__ ++#define __R128_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the X server file (r128_sarea.h) ++ */ ++#ifndef __R128_SAREA_DEFINES__ ++#define __R128_SAREA_DEFINES__ ++ ++/* What needs to be changed for the current vertex buffer? ++ */ ++#define R128_UPLOAD_CONTEXT 0x001 ++#define R128_UPLOAD_SETUP 0x002 ++#define R128_UPLOAD_TEX0 0x004 ++#define R128_UPLOAD_TEX1 0x008 ++#define R128_UPLOAD_TEX0IMAGES 0x010 ++#define R128_UPLOAD_TEX1IMAGES 0x020 ++#define R128_UPLOAD_CORE 0x040 ++#define R128_UPLOAD_MASKS 0x080 ++#define R128_UPLOAD_WINDOW 0x100 ++#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */ ++#define R128_REQUIRE_QUIESCENCE 0x400 ++#define R128_UPLOAD_ALL 0x7ff ++ ++#define R128_FRONT 0x1 ++#define R128_BACK 0x2 ++#define R128_DEPTH 0x4 ++ ++/* Primitive types ++ */ ++#define R128_POINTS 0x1 ++#define R128_LINES 0x2 ++#define R128_LINE_STRIP 0x3 ++#define R128_TRIANGLES 0x4 ++#define R128_TRIANGLE_FAN 0x5 ++#define R128_TRIANGLE_STRIP 0x6 ++ ++/* Vertex/indirect buffer size ++ */ ++#define R128_BUFFER_SIZE 16384 ++ ++/* Byte offsets for indirect buffer data ++ */ ++#define R128_INDEX_PRIM_OFFSET 20 ++#define R128_HOSTDATA_BLIT_OFFSET 32 ++ ++/* Keep these small for testing. ++ */ ++#define R128_NR_SAREA_CLIPRECTS 12 ++ ++/* There are 2 heaps (local/AGP). Each region within a heap is a ++ * minimum of 64k, and there are at most 64 of them per heap. ++ */ ++#define R128_LOCAL_TEX_HEAP 0 ++#define R128_AGP_TEX_HEAP 1 ++#define R128_NR_TEX_HEAPS 2 ++#define R128_NR_TEX_REGIONS 64 ++#define R128_LOG_TEX_GRANULARITY 16 ++ ++#define R128_NR_CONTEXT_REGS 12 ++ ++#define R128_MAX_TEXTURE_LEVELS 11 ++#define R128_MAX_TEXTURE_UNITS 2 ++ ++#endif /* __R128_SAREA_DEFINES__ */ ++ ++typedef struct { ++ /* Context state - can be written in one large chunk */ ++ unsigned int dst_pitch_offset_c; ++ unsigned int dp_gui_master_cntl_c; ++ unsigned int sc_top_left_c; ++ unsigned int sc_bottom_right_c; ++ unsigned int z_offset_c; ++ unsigned int z_pitch_c; ++ unsigned int z_sten_cntl_c; ++ unsigned int tex_cntl_c; ++ unsigned int misc_3d_state_cntl_reg; ++ unsigned int texture_clr_cmp_clr_c; ++ unsigned int texture_clr_cmp_msk_c; ++ unsigned int fog_color_c; ++ ++ /* Texture state */ ++ unsigned int tex_size_pitch_c; ++ unsigned int constant_color_c; ++ ++ /* Setup state */ ++ unsigned int pm4_vc_fpu_setup; ++ unsigned int setup_cntl; ++ ++ /* Mask state */ ++ unsigned int dp_write_mask; ++ unsigned int sten_ref_mask_c; ++ unsigned int plane_3d_mask_c; ++ ++ /* Window state */ ++ unsigned int window_xy_offset; ++ ++ /* Core state */ ++ unsigned int scale_3d_cntl; ++} drm_r128_context_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int tex_cntl; ++ unsigned int tex_combine_cntl; ++ unsigned int tex_size_pitch; ++ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS]; ++ unsigned int tex_border_color; ++} drm_r128_texture_regs_t; ++ ++typedef struct drm_r128_sarea { ++ /* The channel for communication of state information to the kernel ++ * on firing a vertex buffer. ++ */ ++ drm_r128_context_regs_t context_state; ++ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS]; ++ unsigned int dirty; ++ unsigned int vertsize; ++ unsigned int vc_format; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int last_frame; ++ unsigned int last_dispatch; ++ ++ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1]; ++ unsigned int tex_age[R128_NR_TEX_HEAPS]; ++ int ctx_owner; ++ int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */ ++ int pfCurrentPage; /* which buffer is being displayed? */ ++} drm_r128_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (xf86drmR128.h) ++ */ ++ ++/* Rage 128 specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_R128_INIT 0x00 ++#define DRM_R128_CCE_START 0x01 ++#define DRM_R128_CCE_STOP 0x02 ++#define DRM_R128_CCE_RESET 0x03 ++#define DRM_R128_CCE_IDLE 0x04 ++/* 0x05 not used */ ++#define DRM_R128_RESET 0x06 ++#define DRM_R128_SWAP 0x07 ++#define DRM_R128_CLEAR 0x08 ++#define DRM_R128_VERTEX 0x09 ++#define DRM_R128_INDICES 0x0a ++#define DRM_R128_BLIT 0x0b ++#define DRM_R128_DEPTH 0x0c ++#define DRM_R128_STIPPLE 0x0d ++/* 0x0e not used */ ++#define DRM_R128_INDIRECT 0x0f ++#define DRM_R128_FULLSCREEN 0x10 ++#define DRM_R128_CLEAR2 0x11 ++#define DRM_R128_GETPARAM 0x12 ++#define DRM_R128_FLIP 0x13 ++ ++#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t) ++#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START) ++#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t) ++#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET) ++#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE) ++/* 0x05 not used */ ++#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET) ++#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP) ++#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t) ++#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t) ++#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t) ++#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t) ++#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t) ++#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t) ++/* 0x0e not used */ ++#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) ++#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) ++#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) ++#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) ++#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) ++ ++typedef struct drm_r128_init { ++ enum { ++ R128_INIT_CCE = 0x01, ++ R128_CLEANUP_CCE = 0x02 ++ } func; ++ unsigned long sarea_priv_offset; ++ int is_pci; ++ int cce_mode; ++ int cce_secure; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int span_offset; ++ ++ unsigned long fb_offset; ++ unsigned long mmio_offset; ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++} drm_r128_init_t; ++ ++typedef struct drm_r128_cce_stop { ++ int flush; ++ int idle; ++} drm_r128_cce_stop_t; ++ ++typedef struct drm_r128_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; ++} drm_r128_clear_t; ++ ++typedef struct drm_r128_vertex { ++ int prim; ++ int idx; /* Index of vertex buffer */ ++ int count; /* Number of vertices in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_r128_vertex_t; ++ ++typedef struct drm_r128_indices { ++ int prim; ++ int idx; ++ int start; ++ int end; ++ int discard; /* Client finished with buffer? */ ++} drm_r128_indices_t; ++ ++typedef struct drm_r128_blit { ++ int idx; ++ int pitch; ++ int offset; ++ int format; ++ unsigned short x, y; ++ unsigned short width, height; ++} drm_r128_blit_t; ++ ++typedef struct drm_r128_depth { ++ enum { ++ R128_WRITE_SPAN = 0x01, ++ R128_WRITE_PIXELS = 0x02, ++ R128_READ_SPAN = 0x03, ++ R128_READ_PIXELS = 0x04 ++ } func; ++ int n; ++ int __user *x; ++ int __user *y; ++ unsigned int __user *buffer; ++ unsigned char __user *mask; ++} drm_r128_depth_t; ++ ++typedef struct drm_r128_stipple { ++ unsigned int __user *mask; ++} drm_r128_stipple_t; ++ ++typedef struct drm_r128_indirect { ++ int idx; ++ int start; ++ int end; ++ int discard; ++} drm_r128_indirect_t; ++ ++typedef struct drm_r128_fullscreen { ++ enum { ++ R128_INIT_FULLSCREEN = 0x01, ++ R128_CLEANUP_FULLSCREEN = 0x02 ++ } func; ++} drm_r128_fullscreen_t; ++ ++/* 2.3: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define R128_PARAM_IRQ_NR 1 ++ ++typedef struct drm_r128_getparam { ++ int param; ++ void __user *value; ++} drm_r128_getparam_t; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.c git-nokia/drivers/gpu/drm-tungsten/r128_drv.c +--- git/drivers/gpu/drm-tungsten/r128_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,113 @@ ++/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*- ++ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ r128_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof(drm_r128_buf_priv_t), ++ .preclose = r128_driver_preclose, ++ .lastclose = r128_driver_lastclose, ++ .get_vblank_counter = r128_get_vblank_counter, ++ .enable_vblank = r128_enable_vblank, ++ .disable_vblank = r128_disable_vblank, ++ .irq_preinstall = r128_driver_irq_preinstall, ++ .irq_postinstall = r128_driver_irq_postinstall, ++ .irq_uninstall = r128_driver_irq_uninstall, ++ .irq_handler = r128_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = r128_ioctls, ++ .dma_ioctl = r128_cce_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = r128_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++ ++static int __init r128_init(void) ++{ ++ driver.num_ioctls = r128_max_ioctl; ++ ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit r128_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(r128_init); ++module_exit(r128_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.h git-nokia/drivers/gpu/drm-tungsten/r128_drv.h +--- git/drivers/gpu/drm-tungsten/r128_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,525 @@ ++/* r128_drv.h -- Private header for r128 driver -*- linux-c -*- ++ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com ++ */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Rickard E. (Rik) Faith ++ * Kevin E. Martin ++ * Gareth Hughes ++ * Michel D�zer ++ */ ++ ++#ifndef __R128_DRV_H__ ++#define __R128_DRV_H__ ++ ++/* General customization: ++ */ ++#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc." ++ ++#define DRIVER_NAME "r128" ++#define DRIVER_DESC "ATI Rage 128" ++#define DRIVER_DATE "20030725" ++ ++/* Interface history: ++ * ++ * ?? - ?? ++ * 2.4 - Add support for ycbcr textures (no new ioctls) ++ * 2.5 - Add FLIP ioctl, disable FULLSCREEN. ++ */ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 5 ++#define DRIVER_PATCHLEVEL 0 ++ ++#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) ++ ++typedef struct drm_r128_freelist { ++ unsigned int age; ++ struct drm_buf *buf; ++ struct drm_r128_freelist *next; ++ struct drm_r128_freelist *prev; ++} drm_r128_freelist_t; ++ ++typedef struct drm_r128_ring_buffer { ++ u32 *start; ++ u32 *end; ++ int size; ++ int size_l2qw; ++ ++ u32 tail; ++ u32 tail_mask; ++ int space; ++ ++ int high_mark; ++} drm_r128_ring_buffer_t; ++ ++typedef struct drm_r128_private { ++ drm_r128_ring_buffer_t ring; ++ drm_r128_sarea_t *sarea_priv; ++ ++ int cce_mode; ++ int cce_fifo_size; ++ int cce_running; ++ ++ drm_r128_freelist_t *head; ++ drm_r128_freelist_t *tail; ++ ++ int usec_timeout; ++ int is_pci; ++ unsigned long cce_buffers_offset; ++ ++ atomic_t idle_count; ++ ++ int page_flipping; ++ int current_page; ++ u32 crtc_offset; ++ u32 crtc_offset_cntl; ++ ++ atomic_t vbl_received; ++ ++ u32 color_fmt; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ u32 depth_fmt; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ unsigned int span_offset; ++ ++ u32 front_pitch_offset_c; ++ u32 back_pitch_offset_c; ++ u32 depth_pitch_offset_c; ++ u32 span_pitch_offset_c; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *cce_ring; ++ drm_local_map_t *ring_rptr; ++ drm_local_map_t *agp_textures; ++ struct drm_ati_pcigart_info gart_info; ++} drm_r128_private_t; ++ ++typedef struct drm_r128_buf_priv { ++ u32 age; ++ int prim; ++ int discard; ++ int dispatched; ++ drm_r128_freelist_t *list_entry; ++} drm_r128_buf_priv_t; ++ ++extern struct drm_ioctl_desc r128_ioctls[]; ++extern int r128_max_ioctl; ++ ++ /* r128_cce.c */ ++extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++extern void r128_freelist_reset(struct drm_device * dev); ++ ++extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); ++ ++extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); ++extern int r128_do_cleanup_cce(struct drm_device * dev); ++ ++extern int r128_enable_vblank(struct drm_device *dev, int crtc); ++extern void r128_disable_vblank(struct drm_device *dev, int crtc); ++extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); ++extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); ++extern void r128_driver_irq_preinstall(struct drm_device * dev); ++extern int r128_driver_irq_postinstall(struct drm_device * dev); ++extern void r128_driver_irq_uninstall(struct drm_device * dev); ++extern void r128_driver_lastclose(struct drm_device * dev); ++extern void r128_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++ ++extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++/* Register definitions, register access macros and drmAddMap constants ++ * for Rage 128 kernel driver. ++ */ ++ ++#define R128_AUX_SC_CNTL 0x1660 ++# define R128_AUX1_SC_EN (1 << 0) ++# define R128_AUX1_SC_MODE_OR (0 << 1) ++# define R128_AUX1_SC_MODE_NAND (1 << 1) ++# define R128_AUX2_SC_EN (1 << 2) ++# define R128_AUX2_SC_MODE_OR (0 << 3) ++# define R128_AUX2_SC_MODE_NAND (1 << 3) ++# define R128_AUX3_SC_EN (1 << 4) ++# define R128_AUX3_SC_MODE_OR (0 << 5) ++# define R128_AUX3_SC_MODE_NAND (1 << 5) ++#define R128_AUX1_SC_LEFT 0x1664 ++#define R128_AUX1_SC_RIGHT 0x1668 ++#define R128_AUX1_SC_TOP 0x166c ++#define R128_AUX1_SC_BOTTOM 0x1670 ++#define R128_AUX2_SC_LEFT 0x1674 ++#define R128_AUX2_SC_RIGHT 0x1678 ++#define R128_AUX2_SC_TOP 0x167c ++#define R128_AUX2_SC_BOTTOM 0x1680 ++#define R128_AUX3_SC_LEFT 0x1684 ++#define R128_AUX3_SC_RIGHT 0x1688 ++#define R128_AUX3_SC_TOP 0x168c ++#define R128_AUX3_SC_BOTTOM 0x1690 ++ ++#define R128_BRUSH_DATA0 0x1480 ++#define R128_BUS_CNTL 0x0030 ++# define R128_BUS_MASTER_DIS (1 << 6) ++ ++#define R128_CLOCK_CNTL_INDEX 0x0008 ++#define R128_CLOCK_CNTL_DATA 0x000c ++# define R128_PLL_WR_EN (1 << 7) ++#define R128_CONSTANT_COLOR_C 0x1d34 ++#define R128_CRTC_OFFSET 0x0224 ++#define R128_CRTC_OFFSET_CNTL 0x0228 ++# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16) ++ ++#define R128_DP_GUI_MASTER_CNTL 0x146c ++# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) ++# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) ++# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4) ++# define R128_GMC_BRUSH_NONE (15 << 4) ++# define R128_GMC_DST_16BPP (4 << 8) ++# define R128_GMC_DST_24BPP (5 << 8) ++# define R128_GMC_DST_32BPP (6 << 8) ++# define R128_GMC_DST_DATATYPE_SHIFT 8 ++# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12) ++# define R128_DP_SRC_SOURCE_MEMORY (2 << 24) ++# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24) ++# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28) ++# define R128_GMC_AUX_CLIP_DIS (1 << 29) ++# define R128_GMC_WR_MSK_DIS (1 << 30) ++# define R128_ROP3_S 0x00cc0000 ++# define R128_ROP3_P 0x00f00000 ++#define R128_DP_WRITE_MASK 0x16cc ++#define R128_DST_PITCH_OFFSET_C 0x1c80 ++# define R128_DST_TILE (1 << 31) ++ ++#define R128_GEN_INT_CNTL 0x0040 ++# define R128_CRTC_VBLANK_INT_EN (1 << 0) ++#define R128_GEN_INT_STATUS 0x0044 ++# define R128_CRTC_VBLANK_INT (1 << 0) ++# define R128_CRTC_VBLANK_INT_AK (1 << 0) ++#define R128_GEN_RESET_CNTL 0x00f0 ++# define R128_SOFT_RESET_GUI (1 << 0) ++ ++#define R128_GUI_SCRATCH_REG0 0x15e0 ++#define R128_GUI_SCRATCH_REG1 0x15e4 ++#define R128_GUI_SCRATCH_REG2 0x15e8 ++#define R128_GUI_SCRATCH_REG3 0x15ec ++#define R128_GUI_SCRATCH_REG4 0x15f0 ++#define R128_GUI_SCRATCH_REG5 0x15f4 ++ ++#define R128_GUI_STAT 0x1740 ++# define R128_GUI_FIFOCNT_MASK 0x0fff ++# define R128_GUI_ACTIVE (1 << 31) ++ ++#define R128_MCLK_CNTL 0x000f ++# define R128_FORCE_GCP (1 << 16) ++# define R128_FORCE_PIPE3D_CP (1 << 17) ++# define R128_FORCE_RCP (1 << 18) ++ ++#define R128_PC_GUI_CTLSTAT 0x1748 ++#define R128_PC_NGUI_CTLSTAT 0x0184 ++# define R128_PC_FLUSH_GUI (3 << 0) ++# define R128_PC_RI_GUI (1 << 2) ++# define R128_PC_FLUSH_ALL 0x00ff ++# define R128_PC_BUSY (1 << 31) ++ ++#define R128_PCI_GART_PAGE 0x017c ++#define R128_PRIM_TEX_CNTL_C 0x1cb0 ++ ++#define R128_SCALE_3D_CNTL 0x1a00 ++#define R128_SEC_TEX_CNTL_C 0x1d00 ++#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c ++#define R128_SETUP_CNTL 0x1bc4 ++#define R128_STEN_REF_MASK_C 0x1d40 ++ ++#define R128_TEX_CNTL_C 0x1c9c ++# define R128_TEX_CACHE_FLUSH (1 << 23) ++ ++#define R128_WAIT_UNTIL 0x1720 ++# define R128_EVENT_CRTC_OFFSET (1 << 0) ++#define R128_WINDOW_XY_OFFSET 0x1bcc ++ ++/* CCE registers ++ */ ++#define R128_PM4_BUFFER_OFFSET 0x0700 ++#define R128_PM4_BUFFER_CNTL 0x0704 ++# define R128_PM4_MASK (15 << 28) ++# define R128_PM4_NONPM4 (0 << 28) ++# define R128_PM4_192PIO (1 << 28) ++# define R128_PM4_192BM (2 << 28) ++# define R128_PM4_128PIO_64INDBM (3 << 28) ++# define R128_PM4_128BM_64INDBM (4 << 28) ++# define R128_PM4_64PIO_128INDBM (5 << 28) ++# define R128_PM4_64BM_128INDBM (6 << 28) ++# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28) ++# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28) ++# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28) ++# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27) ++ ++#define R128_PM4_BUFFER_WM_CNTL 0x0708 ++# define R128_WMA_SHIFT 0 ++# define R128_WMB_SHIFT 8 ++# define R128_WMC_SHIFT 16 ++# define R128_WB_WM_SHIFT 24 ++ ++#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c ++#define R128_PM4_BUFFER_DL_RPTR 0x0710 ++#define R128_PM4_BUFFER_DL_WPTR 0x0714 ++# define R128_PM4_BUFFER_DL_DONE (1 << 31) ++ ++#define R128_PM4_VC_FPU_SETUP 0x071c ++ ++#define R128_PM4_IW_INDOFF 0x0738 ++#define R128_PM4_IW_INDSIZE 0x073c ++ ++#define R128_PM4_STAT 0x07b8 ++# define R128_PM4_FIFOCNT_MASK 0x0fff ++# define R128_PM4_BUSY (1 << 16) ++# define R128_PM4_GUI_ACTIVE (1 << 31) ++ ++#define R128_PM4_MICROCODE_ADDR 0x07d4 ++#define R128_PM4_MICROCODE_RADDR 0x07d8 ++#define R128_PM4_MICROCODE_DATAH 0x07dc ++#define R128_PM4_MICROCODE_DATAL 0x07e0 ++ ++#define R128_PM4_BUFFER_ADDR 0x07f0 ++#define R128_PM4_MICRO_CNTL 0x07fc ++# define R128_PM4_MICRO_FREERUN (1 << 30) ++ ++#define R128_PM4_FIFO_DATA_EVEN 0x1000 ++#define R128_PM4_FIFO_DATA_ODD 0x1004 ++ ++/* CCE command packets ++ */ ++#define R128_CCE_PACKET0 0x00000000 ++#define R128_CCE_PACKET1 0x40000000 ++#define R128_CCE_PACKET2 0x80000000 ++#define R128_CCE_PACKET3 0xC0000000 ++# define R128_CNTL_HOSTDATA_BLT 0x00009400 ++# define R128_CNTL_PAINT_MULTI 0x00009A00 ++# define R128_CNTL_BITBLT_MULTI 0x00009B00 ++# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300 ++ ++#define R128_CCE_PACKET_MASK 0xC0000000 ++#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000 ++#define R128_CCE_PACKET0_REG_MASK 0x000007ff ++#define R128_CCE_PACKET1_REG0_MASK 0x000007ff ++#define R128_CCE_PACKET1_REG1_MASK 0x003ff800 ++ ++#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006 ++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007 ++#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010 ++#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020 ++#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030 ++#define R128_CCE_VC_CNTL_NUM_SHIFT 16 ++ ++#define R128_DATATYPE_VQ 0 ++#define R128_DATATYPE_CI4 1 ++#define R128_DATATYPE_CI8 2 ++#define R128_DATATYPE_ARGB1555 3 ++#define R128_DATATYPE_RGB565 4 ++#define R128_DATATYPE_RGB888 5 ++#define R128_DATATYPE_ARGB8888 6 ++#define R128_DATATYPE_RGB332 7 ++#define R128_DATATYPE_Y8 8 ++#define R128_DATATYPE_RGB8 9 ++#define R128_DATATYPE_CI16 10 ++#define R128_DATATYPE_YVYU422 11 ++#define R128_DATATYPE_VYUY422 12 ++#define R128_DATATYPE_AYUV444 14 ++#define R128_DATATYPE_ARGB4444 15 ++ ++/* Constants */ ++#define R128_AGP_OFFSET 0x02000000 ++ ++#define R128_WATERMARK_L 16 ++#define R128_WATERMARK_M 8 ++#define R128_WATERMARK_N 8 ++#define R128_WATERMARK_K 128 ++ ++#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */ ++ ++#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0 ++#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1 ++#define R128_MAX_VB_AGE 0x7fffffff ++#define R128_MAX_VB_VERTS (0xffff) ++ ++#define R128_RING_HIGH_MARK 128 ++ ++#define R128_PERFORMANCE_BOXES 0 ++ ++#define R128_PCIGART_TABLE_SIZE 32768 ++ ++#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) ++#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) ++ ++#define R128_WRITE_PLL(addr,val) \ ++do { \ ++ R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ ++ ((addr) & 0x1f) | R128_PLL_WR_EN); \ ++ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ ++} while (0) ++ ++#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ ++ ((n) << 16) | ((reg) >> 2)) ++#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ ++ (((reg1) >> 2) << 11) | ((reg0) >> 2)) ++#define CCE_PACKET2() (R128_CCE_PACKET2) ++#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ ++ (pkt) | ((n) << 16)) ++ ++static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; ++ ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) ++ ring->space += ring->size; ++} ++ ++/* ================================================================ ++ * Misc helper macros ++ */ ++ ++#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ ++ if ( ring->space < ring->high_mark ) { \ ++ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ ++ r128_update_ring_snapshot( dev_priv ); \ ++ if ( ring->space >= ring->high_mark ) \ ++ goto __ring_space_done; \ ++ DRM_UDELAY(1); \ ++ } \ ++ DRM_ERROR( "ring space check failed!\n" ); \ ++ return -EBUSY; \ ++ } \ ++ __ring_space_done: \ ++ ; \ ++} while (0) ++ ++#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ ++ if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ ++ int __ret = r128_do_cce_idle( dev_priv ); \ ++ if ( __ret ) return __ret; \ ++ sarea_priv->last_dispatch = 0; \ ++ r128_freelist_reset( dev ); \ ++ } \ ++} while (0) ++ ++#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ ++ OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( R128_EVENT_CRTC_OFFSET ); \ ++} while (0) ++ ++/* ================================================================ ++ * Ring control ++ */ ++ ++#define R128_VERBOSE 0 ++ ++#define RING_LOCALS \ ++ int write, _nr; unsigned int tail_mask; volatile u32 *ring; ++ ++#define BEGIN_RING( n ) do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ ++ } \ ++ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ ++ COMMIT_RING(); \ ++ r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ ++ } \ ++ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ++ ring = dev_priv->ring.start; \ ++ write = dev_priv->ring.tail; \ ++ tail_mask = dev_priv->ring.tail_mask; \ ++} while (0) ++ ++/* You can set this to zero if you want. If the card locks up, you'll ++ * need to keep this set. It works around a bug in early revs of the ++ * Rage 128 chipset, where the CCE would read 32 dwords past the end of ++ * the ring buffer before wrapping around. ++ */ ++#define R128_BROKEN_CCE 1 ++ ++#define ADVANCE_RING() do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ write, dev_priv->ring.tail ); \ ++ } \ ++ if ( R128_BROKEN_CCE && write < 32 ) { \ ++ memcpy( dev_priv->ring.end, \ ++ dev_priv->ring.start, \ ++ write * sizeof(u32) ); \ ++ } \ ++ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ ++ DRM_ERROR( \ ++ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ++ ((dev_priv->ring.tail + _nr) & tail_mask), \ ++ write, __LINE__); \ ++ } else \ ++ dev_priv->ring.tail = write; \ ++} while (0) ++ ++#define COMMIT_RING() do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ ++ dev_priv->ring.tail ); \ ++ } \ ++ DRM_MEMORYBARRIER(); \ ++ R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ ++ R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ ++} while (0) ++ ++#define OUT_RING( x ) do { \ ++ if ( R128_VERBOSE ) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), write ); \ ++ } \ ++ ring[write++] = cpu_to_le32( x ); \ ++ write &= tail_mask; \ ++} while (0) ++ ++#endif /* __R128_DRV_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/r128_ioc32.c git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c +--- git/drivers/gpu/drm-tungsten/r128_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,222 @@ ++/** ++ * \file r128_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the R128 DRM. ++ * ++ * \author Dave Airlie with code from patches by Egbert Eich ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * Copyright (C) Egbert Eich 2003,2004 ++ * Copyright (C) Dave Airlie 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++ ++typedef struct drm_r128_init32 { ++ int func; ++ unsigned int sarea_priv_offset; ++ int is_pci; ++ int cce_mode; ++ int cce_secure; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ unsigned int span_offset; ++ ++ unsigned int fb_offset; ++ unsigned int mmio_offset; ++ unsigned int ring_offset; ++ unsigned int ring_rptr_offset; ++ unsigned int buffers_offset; ++ unsigned int agp_textures_offset; ++} drm_r128_init32_t; ++ ++static int compat_r128_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_init32_t init32; ++ drm_r128_init_t __user *init; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.is_pci, &init->is_pci) ++ || __put_user(init32.cce_mode, &init->cce_mode) ++ || __put_user(init32.cce_secure, &init->cce_secure) ++ || __put_user(init32.ring_size, &init->ring_size) ++ || __put_user(init32.usec_timeout, &init->usec_timeout) ++ || __put_user(init32.fb_bpp, &init->fb_bpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_bpp, &init->depth_bpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.span_offset, &init->span_offset) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.ring_offset, &init->ring_offset) ++ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset) ++ || __put_user(init32.agp_textures_offset, ++ &init->agp_textures_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_INIT, (unsigned long)init); ++} ++ ++ ++typedef struct drm_r128_depth32 { ++ int func; ++ int n; ++ u32 x; ++ u32 y; ++ u32 buffer; ++ u32 mask; ++} drm_r128_depth32_t; ++ ++static int compat_r128_depth(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_depth32_t depth32; ++ drm_r128_depth_t __user *depth; ++ ++ if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32))) ++ return -EFAULT; ++ ++ depth = compat_alloc_user_space(sizeof(*depth)); ++ if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth)) ++ || __put_user(depth32.func, &depth->func) ++ || __put_user(depth32.n, &depth->n) ++ || __put_user((int __user *)(unsigned long)depth32.x, &depth->x) ++ || __put_user((int __user *)(unsigned long)depth32.y, &depth->y) ++ || __put_user((unsigned int __user *)(unsigned long)depth32.buffer, ++ &depth->buffer) ++ || __put_user((unsigned char __user *)(unsigned long)depth32.mask, ++ &depth->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_DEPTH, (unsigned long)depth); ++ ++} ++ ++typedef struct drm_r128_stipple32 { ++ u32 mask; ++} drm_r128_stipple32_t; ++ ++static int compat_r128_stipple(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_stipple32_t stipple32; ++ drm_r128_stipple_t __user *stipple; ++ ++ if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32))) ++ return -EFAULT; ++ ++ stipple = compat_alloc_user_space(sizeof(*stipple)); ++ if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple)) ++ || __put_user((unsigned int __user *)(unsigned long)stipple32.mask, ++ &stipple->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); ++} ++ ++typedef struct drm_r128_getparam32 { ++ int param; ++ u32 value; ++} drm_r128_getparam32_t; ++ ++static int compat_r128_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_r128_getparam32_t getparam32; ++ drm_r128_getparam_t __user *getparam; ++ ++ if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) ++ return -EFAULT; ++ ++ getparam = compat_alloc_user_space(sizeof(*getparam)); ++ if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) ++ || __put_user(getparam32.param, &getparam->param) ++ || __put_user((void __user *)(unsigned long)getparam32.value, ++ &getparam->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); ++} ++ ++drm_ioctl_compat_t *r128_compat_ioctls[] = { ++ [DRM_R128_INIT] = compat_r128_init, ++ [DRM_R128_DEPTH] = compat_r128_depth, ++ [DRM_R128_STIPPLE] = compat_r128_stipple, ++ [DRM_R128_GETPARAM] = compat_r128_getparam, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) ++ fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/r128_irq.c git-nokia/drivers/gpu/drm-tungsten/r128_irq.c +--- git/drivers/gpu/drm-tungsten/r128_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,116 @@ ++/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Eric Anholt ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ const drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ if (crtc != 0) ++ return 0; ++ ++ return atomic_read(&dev_priv->vbl_received); ++} ++ ++irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ int status; ++ ++ status = R128_READ(R128_GEN_INT_STATUS); ++ ++ /* VBLANK interrupt */ ++ if (status & R128_CRTC_VBLANK_INT) { ++ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); ++ atomic_inc(&dev_priv->vbl_received); ++ drm_handle_vblank(dev, 0); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++int r128_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ ++ if (crtc != 0) { ++ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); ++ return -EINVAL; ++ } ++ ++ R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); ++ return 0; ++} ++ ++void r128_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ if (crtc != 0) ++ DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); ++ ++ /* ++ * FIXME: implement proper interrupt disable by using the vblank ++ * counter register (if available) ++ * ++ * R128_WRITE(R128_GEN_INT_CNTL, ++ * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN); ++ */ ++} ++ ++void r128_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ R128_WRITE(R128_GEN_INT_CNTL, 0); ++ /* Clear vblank bit if it's already high */ ++ R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); ++} ++ ++int r128_driver_irq_postinstall(struct drm_device * dev) ++{ ++ return drm_vblank_init(dev, 1); ++} ++ ++void r128_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ /* Disable *all* interrupts */ ++ R128_WRITE(R128_GEN_INT_CNTL, 0); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/r128_state.c git-nokia/drivers/gpu/drm-tungsten/r128_state.c +--- git/drivers/gpu/drm-tungsten/r128_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r128_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1681 @@ ++/* r128_state.c -- State support for r128 -*- linux-c -*- ++ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com ++ */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "r128_drm.h" ++#include "r128_drv.h" ++ ++/* ================================================================ ++ * CCE hardware state programming functions ++ */ ++ ++static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, ++ struct drm_clip_rect * boxes, int count) ++{ ++ u32 aux_sc_cntl = 0x00000000; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING((count < 3 ? count : 3) * 5 + 2); ++ ++ if (count >= 1) { ++ OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3)); ++ OUT_RING(boxes[0].x1); ++ OUT_RING(boxes[0].x2 - 1); ++ OUT_RING(boxes[0].y1); ++ OUT_RING(boxes[0].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR); ++ } ++ if (count >= 2) { ++ OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3)); ++ OUT_RING(boxes[1].x1); ++ OUT_RING(boxes[1].x2 - 1); ++ OUT_RING(boxes[1].y1); ++ OUT_RING(boxes[1].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR); ++ } ++ if (count >= 3) { ++ OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3)); ++ OUT_RING(boxes[2].x1); ++ OUT_RING(boxes[2].x2 - 1); ++ OUT_RING(boxes[2].y1); ++ OUT_RING(boxes[2].y2 - 1); ++ ++ aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0)); ++ OUT_RING(aux_sc_cntl); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0)); ++ OUT_RING(ctx->scale_3d_cntl); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(13); ++ ++ OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11)); ++ OUT_RING(ctx->dst_pitch_offset_c); ++ OUT_RING(ctx->dp_gui_master_cntl_c); ++ OUT_RING(ctx->sc_top_left_c); ++ OUT_RING(ctx->sc_bottom_right_c); ++ OUT_RING(ctx->z_offset_c); ++ OUT_RING(ctx->z_pitch_c); ++ OUT_RING(ctx->z_sten_cntl_c); ++ OUT_RING(ctx->tex_cntl_c); ++ OUT_RING(ctx->misc_3d_state_cntl_reg); ++ OUT_RING(ctx->texture_clr_cmp_clr_c); ++ OUT_RING(ctx->texture_clr_cmp_msk_c); ++ OUT_RING(ctx->fog_color_c); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(3); ++ ++ OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP)); ++ OUT_RING(ctx->setup_cntl); ++ OUT_RING(ctx->pm4_vc_fpu_setup); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(5); ++ ++ OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); ++ OUT_RING(ctx->dp_write_mask); ++ ++ OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1)); ++ OUT_RING(ctx->sten_ref_mask_c); ++ OUT_RING(ctx->plane_3d_mask_c); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0)); ++ OUT_RING(ctx->window_xy_offset); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_context_regs_t *ctx = &sarea_priv->context_state; ++ drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0]; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS); ++ ++ OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C, ++ 2 + R128_MAX_TEXTURE_LEVELS)); ++ OUT_RING(tex->tex_cntl); ++ OUT_RING(tex->tex_combine_cntl); ++ OUT_RING(ctx->tex_size_pitch_c); ++ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { ++ OUT_RING(tex->tex_offset[i]); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1)); ++ OUT_RING(ctx->constant_color_c); ++ OUT_RING(tex->tex_border_color); ++ ++ ADVANCE_RING(); ++} ++ ++static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS); ++ ++ OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS)); ++ OUT_RING(tex->tex_cntl); ++ OUT_RING(tex->tex_combine_cntl); ++ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { ++ OUT_RING(tex->tex_offset[i]); ++ } ++ ++ OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0)); ++ OUT_RING(tex->tex_border_color); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_emit_state(drm_r128_private_t * dev_priv) ++{ ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ unsigned int dirty = sarea_priv->dirty; ++ ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ ++ if (dirty & R128_UPLOAD_CORE) { ++ r128_emit_core(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_CORE; ++ } ++ ++ if (dirty & R128_UPLOAD_CONTEXT) { ++ r128_emit_context(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT; ++ } ++ ++ if (dirty & R128_UPLOAD_SETUP) { ++ r128_emit_setup(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_SETUP; ++ } ++ ++ if (dirty & R128_UPLOAD_MASKS) { ++ r128_emit_masks(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_MASKS; ++ } ++ ++ if (dirty & R128_UPLOAD_WINDOW) { ++ r128_emit_window(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_WINDOW; ++ } ++ ++ if (dirty & R128_UPLOAD_TEX0) { ++ r128_emit_tex0(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_TEX0; ++ } ++ ++ if (dirty & R128_UPLOAD_TEX1) { ++ r128_emit_tex1(dev_priv); ++ sarea_priv->dirty &= ~R128_UPLOAD_TEX1; ++ } ++ ++ /* Turn off the texture cache flushing */ ++ sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH; ++ ++ sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE; ++} ++ ++#if R128_PERFORMANCE_BOXES ++/* ================================================================ ++ * Performance monitoring functions ++ */ ++ ++static void r128_clear_box(drm_r128_private_t * dev_priv, ++ int x, int y, int w, int h, int r, int g, int b) ++{ ++ u32 pitch, offset; ++ u32 fb_bpp, color; ++ RING_LOCALS; ++ ++ switch (dev_priv->fb_bpp) { ++ case 16: ++ fb_bpp = R128_GMC_DST_16BPP; ++ color = (((r & 0xf8) << 8) | ++ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); ++ break; ++ case 24: ++ fb_bpp = R128_GMC_DST_24BPP; ++ color = ((r << 16) | (g << 8) | b); ++ break; ++ case 32: ++ fb_bpp = R128_GMC_DST_32BPP; ++ color = (((0xff) << 24) | (r << 16) | (g << 8) | b); ++ break; ++ default: ++ return; ++ } ++ ++ offset = dev_priv->back_offset; ++ pitch = dev_priv->back_pitch >> 3; ++ ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ fb_bpp | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING((pitch << 21) | (offset >> 5)); ++ OUT_RING(color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) ++{ ++ if (atomic_read(&dev_priv->idle_count) == 0) { ++ r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); ++ } else { ++ atomic_set(&dev_priv->idle_count, 0); ++ } ++} ++ ++#endif ++ ++/* ================================================================ ++ * CCE command dispatch functions ++ */ ++ ++static void r128_print_dirty(const char *msg, unsigned int flags) ++{ ++ DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n", ++ msg, ++ flags, ++ (flags & R128_UPLOAD_CORE) ? "core, " : "", ++ (flags & R128_UPLOAD_CONTEXT) ? "context, " : "", ++ (flags & R128_UPLOAD_SETUP) ? "setup, " : "", ++ (flags & R128_UPLOAD_TEX0) ? "tex0, " : "", ++ (flags & R128_UPLOAD_TEX1) ? "tex1, " : "", ++ (flags & R128_UPLOAD_MASKS) ? "masks, " : "", ++ (flags & R128_UPLOAD_WINDOW) ? "window, " : "", ++ (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "", ++ (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); ++} ++ ++static void r128_cce_dispatch_clear(struct drm_device * dev, ++ drm_r128_clear_t * clear) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ unsigned int flags = clear->flags; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ if (dev_priv->page_flipping && dev_priv->current_page == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(R128_FRONT | R128_BACK); ++ if (tmp & R128_FRONT) ++ flags |= R128_BACK; ++ if (tmp & R128_BACK) ++ flags |= R128_FRONT; ++ } ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", ++ pbox[i].x1, pbox[i].y1, pbox[i].x2, ++ pbox[i].y2, flags); ++ ++ if (flags & (R128_FRONT | R128_BACK)) { ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); ++ OUT_RING(clear->color_mask); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_FRONT) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_BACK) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS); ++ ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & R128_DEPTH) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(clear->clear_depth); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ } ++} ++ ++static void r128_cce_dispatch_swap(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++#if R128_PERFORMANCE_BOXES ++ /* Do some trivial performance monitoring... ++ */ ++ r128_cce_performance_boxes(dev_priv); ++#endif ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->color_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); ++ ++ /* Make this work even if front & back are flipped: ++ */ ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ } else { ++ OUT_RING(dev_priv->front_pitch_offset_c); ++ OUT_RING(dev_priv->back_pitch_offset_c); ++ } ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); ++ OUT_RING(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_dispatch_flip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("page=%d pfCurrentPage=%d\n", ++ dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage); ++ ++#if R128_PERFORMANCE_BOXES ++ /* Do some trivial performance monitoring... ++ */ ++ r128_cce_performance_boxes(dev_priv); ++#endif ++ ++ BEGIN_RING(4); ++ ++ R128_WAIT_UNTIL_PAGE_FLIPPED(); ++ OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0)); ++ ++ if (dev_priv->current_page == 0) { ++ OUT_RING(dev_priv->back_offset); ++ } else { ++ OUT_RING(dev_priv->front_offset); ++ } ++ ++ ADVANCE_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = ++ 1 - dev_priv->current_page; ++ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); ++ OUT_RING(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int format = sarea_priv->vc_format; ++ int offset = buf->bus_address; ++ int size = buf->used; ++ int prim = buf_priv->prim; ++ int i = 0; ++ RING_LOCALS; ++ DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox); ++ ++ if (0) ++ r128_print_dirty("dispatch_vertex", sarea_priv->dirty); ++ ++ if (buf->used) { ++ buf_priv->dispatched = 1; ++ ++ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { ++ r128_emit_state(dev_priv); ++ } ++ ++ do { ++ /* Emit the next set of up to three cliprects */ ++ if (i < sarea_priv->nbox) { ++ r128_emit_clip_rects(dev_priv, ++ &sarea_priv->boxes[i], ++ sarea_priv->nbox - i); ++ } ++ ++ /* Emit the vertex buffer rendering commands */ ++ BEGIN_RING(5); ++ ++ OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3)); ++ OUT_RING(offset); ++ OUT_RING(size); ++ OUT_RING(format); ++ OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST | ++ (size << R128_CCE_VC_CNTL_NUM_SHIFT)); ++ ++ ADVANCE_RING(); ++ ++ i += 3; ++ } while (i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++ ++ sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++} ++ ++static void r128_cce_dispatch_indirect(struct drm_device * dev, ++ struct drm_buf * buf, int start, int end) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); ++ ++ if (start != end) { ++ int offset = buf->bus_address + start; ++ int dwords = (end - start + 3) / sizeof(u32); ++ ++ /* Indirect buffer data must be an even number of ++ * dwords, so if we've been given an odd number we must ++ * pad the data with a Type-2 CCE packet. ++ */ ++ if (dwords & 1) { ++ u32 *data = (u32 *) ++ ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ data[dwords++] = cpu_to_le32(R128_CCE_PACKET2); ++ } ++ ++ buf_priv->dispatched = 1; ++ ++ /* Fire off the indirect buffer */ ++ BEGIN_RING(3); ++ ++ OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1)); ++ OUT_RING(offset); ++ OUT_RING(dwords); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the indirect buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++} ++ ++static void r128_cce_dispatch_indices(struct drm_device * dev, ++ struct drm_buf * buf, ++ int start, int end, int count) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_buf_priv_t *buf_priv = buf->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int format = sarea_priv->vc_format; ++ int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset; ++ int prim = buf_priv->prim; ++ u32 *data; ++ int dwords; ++ int i = 0; ++ RING_LOCALS; ++ DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count); ++ ++ if (0) ++ r128_print_dirty("dispatch_indices", sarea_priv->dirty); ++ ++ if (start != end) { ++ buf_priv->dispatched = 1; ++ ++ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { ++ r128_emit_state(dev_priv); ++ } ++ ++ dwords = (end - start + 3) / sizeof(u32); ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ ++ data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, ++ dwords - 2)); ++ ++ data[1] = cpu_to_le32(offset); ++ data[2] = cpu_to_le32(R128_MAX_VB_VERTS); ++ data[3] = cpu_to_le32(format); ++ data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND | ++ (count << 16))); ++ ++ if (count & 0x1) { ++#ifdef __LITTLE_ENDIAN ++ data[dwords - 1] &= 0x0000ffff; ++#else ++ data[dwords - 1] &= 0xffff0000; ++#endif ++ } ++ ++ do { ++ /* Emit the next set of up to three cliprects */ ++ if (i < sarea_priv->nbox) { ++ r128_emit_clip_rects(dev_priv, ++ &sarea_priv->boxes[i], ++ sarea_priv->nbox - i); ++ } ++ ++ r128_cce_dispatch_indirect(dev, buf, start, end); ++ ++ i += 3; ++ } while (i < sarea_priv->nbox); ++ } ++ ++ if (buf_priv->discard) { ++ buf_priv->age = dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); ++ OUT_RING(buf_priv->age); ++ ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ /* FIXME: Check dispatched field */ ++ buf_priv->dispatched = 0; ++ } ++ ++ dev_priv->sarea_priv->last_dispatch++; ++ ++ sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; ++ sarea_priv->nbox = 0; ++} ++ ++static int r128_cce_dispatch_blit(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_r128_blit_t * blit) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ u32 *data; ++ int dword_shift, dwords; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (blit->format) { ++ case R128_DATATYPE_ARGB8888: ++ dword_shift = 0; ++ break; ++ case R128_DATATYPE_ARGB1555: ++ case R128_DATATYPE_RGB565: ++ case R128_DATATYPE_ARGB4444: ++ case R128_DATATYPE_YVYU422: ++ case R128_DATATYPE_VYUY422: ++ dword_shift = 1; ++ break; ++ case R128_DATATYPE_CI8: ++ case R128_DATATYPE_RGB8: ++ dword_shift = 2; ++ break; ++ default: ++ DRM_ERROR("invalid blit format %d\n", blit->format); ++ return -EINVAL; ++ } ++ ++ /* Flush the pixel cache, and mark the contents as Read Invalid. ++ * This ensures no pixel data gets mixed up with the texture ++ * data from the host data blit, otherwise part of the texture ++ * image may be corrupted. ++ */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); ++ OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI); ++ ++ ADVANCE_RING(); ++ ++ /* Dispatch the indirect buffer. ++ */ ++ buf = dma->buflist[blit->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", blit->idx); ++ return -EINVAL; ++ } ++ ++ buf_priv->discard = 1; ++ ++ dwords = (blit->width * blit->height) >> dword_shift; ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); ++ ++ data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6)); ++ data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (blit->format << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_HOST_DATA | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS)); ++ ++ data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5)); ++ data[3] = cpu_to_le32(0xffffffff); ++ data[4] = cpu_to_le32(0xffffffff); ++ data[5] = cpu_to_le32((blit->y << 16) | blit->x); ++ data[6] = cpu_to_le32((blit->height << 16) | blit->width); ++ data[7] = cpu_to_le32(dwords); ++ ++ buf->used = (dwords + 8) * sizeof(u32); ++ ++ r128_cce_dispatch_indirect(dev, buf, 0, buf->used); ++ ++ /* Flush the pixel cache after the blit completes. This ensures ++ * the texture data is written out to memory before rendering ++ * continues. ++ */ ++ BEGIN_RING(2); ++ ++ OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); ++ OUT_RING(R128_PC_FLUSH_GUI); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Tiled depth buffer management ++ * ++ * FIXME: These should all set the destination write mask for when we ++ * have hardware stencil support. ++ */ ++ ++static int r128_cce_dispatch_write_span(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, x, y; ++ u32 *buffer; ++ u8 *mask; ++ int i, buffer_size, mask_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { ++ return -EFAULT; ++ } ++ ++ buffer_size = depth->n * sizeof(u32); ++ buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); ++ if (buffer == NULL) ++ return -ENOMEM; ++ if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ mask_size = depth->n * sizeof(u8); ++ if (depth->mask) { ++ mask = drm_alloc(mask_size, DRM_MEM_BUFS); ++ if (mask == NULL) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++, x++) { ++ if (mask[i]) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ } else { ++ for (i = 0; i < count; i++, x++) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_write_pixels(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, *x, *y; ++ u32 *buffer; ++ u8 *mask; ++ int i, xbuf_size, ybuf_size, buffer_size, mask_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ xbuf_size = count * sizeof(*x); ++ ybuf_size = count * sizeof(*y); ++ x = drm_alloc(xbuf_size, DRM_MEM_BUFS); ++ if (x == NULL) { ++ return -ENOMEM; ++ } ++ y = drm_alloc(ybuf_size, DRM_MEM_BUFS); ++ if (y == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ buffer_size = depth->n * sizeof(u32); ++ buffer = drm_alloc(buffer_size, DRM_MEM_BUFS); ++ if (buffer == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ if (depth->mask) { ++ mask_size = depth->n * sizeof(u8); ++ mask = drm_alloc(mask_size, DRM_MEM_BUFS); ++ if (mask == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (mask[i]) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(mask, mask_size, DRM_MEM_BUFS); ++ } else { ++ for (i = 0; i < count; i++) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_P | ++ R128_GMC_CLR_CMP_CNTL_DIS | ++ R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(buffer[i]); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ drm_free(buffer, buffer_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_read_span(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, x, y; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { ++ return -EFAULT; ++ } ++ ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(dev_priv->span_pitch_offset_c); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((0 << 16) | 0); ++ OUT_RING((count << 16) | 1); ++ ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++static int r128_cce_dispatch_read_pixels(struct drm_device * dev, ++ drm_r128_depth_t * depth) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int count, *x, *y; ++ int i, xbuf_size, ybuf_size; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ count = depth->n; ++ if (count > 4096 || count <= 0) ++ return -EMSGSIZE; ++ ++ if (count > dev_priv->depth_pitch) { ++ count = dev_priv->depth_pitch; ++ } ++ ++ xbuf_size = count * sizeof(*x); ++ ybuf_size = count * sizeof(*y); ++ x = drm_alloc(xbuf_size, DRM_MEM_BUFS); ++ if (x == NULL) { ++ return -ENOMEM; ++ } ++ y = drm_alloc(ybuf_size, DRM_MEM_BUFS); ++ if (y == NULL) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < count; i++) { ++ BEGIN_RING(7); ++ ++ OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | ++ R128_GMC_DST_PITCH_OFFSET_CNTL | ++ R128_GMC_BRUSH_NONE | ++ (dev_priv->depth_fmt << 8) | ++ R128_GMC_SRC_DATATYPE_COLOR | ++ R128_ROP3_S | ++ R128_DP_SRC_SOURCE_MEMORY | ++ R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); ++ ++ OUT_RING(dev_priv->depth_pitch_offset_c); ++ OUT_RING(dev_priv->span_pitch_offset_c); ++ ++ OUT_RING((x[i] << 16) | y[i]); ++ OUT_RING((i << 16) | 0); ++ OUT_RING((1 << 16) | 1); ++ ++ ADVANCE_RING(); ++ } ++ ++ drm_free(x, xbuf_size, DRM_MEM_BUFS); ++ drm_free(y, ybuf_size, DRM_MEM_BUFS); ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * Polygon stipple ++ */ ++ ++static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(33); ++ ++ OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31)); ++ for (i = 0; i < 32; i++) { ++ OUT_RING(stipple[i]); ++ } ++ ++ ADVANCE_RING(); ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++ ++static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_r128_clear_t *clear = data; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; ++ ++ r128_cce_dispatch_clear(dev, clear); ++ COMMIT_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS; ++ ++ return 0; ++} ++ ++static int r128_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET); ++ dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL); ++ ++ R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset); ++ R128_WRITE(R128_CRTC_OFFSET_CNTL, ++ dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL); ++ ++ dev_priv->page_flipping = 1; ++ dev_priv->current_page = 0; ++ dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; ++ ++ return 0; ++} ++ ++static int r128_do_cleanup_pageflip(struct drm_device * dev) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset); ++ R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl); ++ ++ if (dev_priv->current_page != 0) { ++ r128_cce_dispatch_flip(dev); ++ COMMIT_RING(); ++ } ++ ++ dev_priv->page_flipping = 0; ++ return 0; ++} ++ ++/* Swapping and flipping are different operations, need different ioctls. ++ * They can & should be intermixed to support multiple 3d windows. ++ */ ++ ++static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (!dev_priv->page_flipping) ++ r128_do_init_pageflip(dev); ++ ++ r128_cce_dispatch_flip(dev); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; ++ ++ r128_cce_dispatch_swap(dev); ++ dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT | ++ R128_UPLOAD_MASKS); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_vertex_t *vertex = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (vertex->prim < 0 || ++ vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ buf->used = vertex->count; ++ buf_priv->prim = vertex->prim; ++ buf_priv->discard = vertex->discard; ++ ++ r128_cce_dispatch_vertex(dev, buf); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_indices_t *elts = data; ++ int count; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, ++ elts->idx, elts->start, elts->end, elts->discard); ++ ++ if (elts->idx < 0 || elts->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ elts->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (elts->prim < 0 || ++ elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { ++ DRM_ERROR("buffer prim %d\n", elts->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[elts->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", elts->idx); ++ return -EINVAL; ++ } ++ ++ count = (elts->end - elts->start) / sizeof(u16); ++ elts->start -= R128_INDEX_PRIM_OFFSET; ++ ++ if (elts->start & 0x7) { ++ DRM_ERROR("misaligned buffer 0x%x\n", elts->start); ++ return -EINVAL; ++ } ++ if (elts->start < buf->used) { ++ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); ++ return -EINVAL; ++ } ++ ++ buf->used = elts->end; ++ buf_priv->prim = elts->prim; ++ buf_priv->discard = elts->discard; ++ ++ r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_blit_t *blit = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); ++ ++ if (blit->idx < 0 || blit->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ blit->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = r128_cce_dispatch_blit(dev, file_priv, blit); ++ ++ COMMIT_RING(); ++ return ret; ++} ++ ++static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_depth_t *depth = data; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = -EINVAL; ++ switch (depth->func) { ++ case R128_WRITE_SPAN: ++ ret = r128_cce_dispatch_write_span(dev, depth); ++ break; ++ case R128_WRITE_PIXELS: ++ ret = r128_cce_dispatch_write_pixels(dev, depth); ++ break; ++ case R128_READ_SPAN: ++ ret = r128_cce_dispatch_read_span(dev, depth); ++ break; ++ case R128_READ_PIXELS: ++ ret = r128_cce_dispatch_read_pixels(dev, depth); ++ break; ++ } ++ ++ COMMIT_RING(); ++ return ret; ++} ++ ++static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_stipple_t *stipple = data; ++ u32 mask[32]; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ r128_cce_dispatch_stipple(dev, mask); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_r128_buf_priv_t *buf_priv; ++ drm_r128_indirect_t *indirect = data; ++#if 0 ++ RING_LOCALS; ++#endif ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", ++ indirect->idx, indirect->start, indirect->end, ++ indirect->discard); ++ ++ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ indirect->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ buf = dma->buflist[indirect->idx]; ++ buf_priv = buf->dev_private; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", indirect->idx); ++ return -EINVAL; ++ } ++ ++ if (indirect->start < buf->used) { ++ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", ++ indirect->start, buf->used); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf->used = indirect->end; ++ buf_priv->discard = indirect->discard; ++ ++#if 0 ++ /* Wait for the 3D stream to idle before the indirect buffer ++ * containing 2D acceleration commands is processed. ++ */ ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++#endif ++ ++ /* Dispatch the indirect buffer full of commands from the ++ * X server. This is insecure and is thus only available to ++ * privileged clients. ++ */ ++ r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ drm_r128_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case R128_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_r128_private_t *dev_priv = dev->dev_private; ++ if (dev_priv->page_flipping) { ++ r128_do_cleanup_pageflip(dev); ++ } ++ } ++} ++ ++void r128_driver_lastclose(struct drm_device * dev) ++{ ++ r128_do_cleanup_cce(dev); ++} ++ ++struct drm_ioctl_desc r128_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), ++}; ++ ++int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); +diff -Nurd git/drivers/gpu/drm-tungsten/r300_cmdbuf.c git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c +--- git/drivers/gpu/drm-tungsten/r300_cmdbuf.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1198 @@ ++/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*- ++ * ++ * Copyright (C) The Weather Channel, Inc. 2002. ++ * Copyright (C) 2004 Nicolai Haehnle. ++ * All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Nicolai Haehnle ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++#include "r300_reg.h" ++ ++#define R300_SIMULTANEOUS_CLIPRECTS 4 ++ ++/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects ++ */ ++static const int r300_cliprect_cntl[4] = { ++ 0xAAAA, ++ 0xEEEE, ++ 0xFEFE, ++ 0xFFFE ++}; ++ ++/** ++ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command ++ * buffer, starting with index n. ++ */ ++static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, int n) ++{ ++ struct drm_clip_rect box; ++ int nr; ++ int i; ++ RING_LOCALS; ++ ++ nr = cmdbuf->nbox - n; ++ if (nr > R300_SIMULTANEOUS_CLIPRECTS) ++ nr = R300_SIMULTANEOUS_CLIPRECTS; ++ ++ DRM_DEBUG("%i cliprects\n", nr); ++ ++ if (nr) { ++ BEGIN_RING(6 + nr * 2); ++ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); ++ ++ for (i = 0; i < nr; ++i) { ++ if (DRM_COPY_FROM_USER_UNCHECKED ++ (&box, &cmdbuf->boxes[n + i], sizeof(box))) { ++ DRM_ERROR("copy cliprect faulted\n"); ++ return -EFAULT; ++ } ++ ++ box.x2--; /* Hardware expects inclusive bottom-right corner */ ++ box.y2--; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ box.x1 = (box.x1) & ++ R300_CLIPRECT_MASK; ++ box.y1 = (box.y1) & ++ R300_CLIPRECT_MASK; ++ box.x2 = (box.x2) & ++ R300_CLIPRECT_MASK; ++ box.y2 = (box.y2) & ++ R300_CLIPRECT_MASK; ++ } else { ++ box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & ++ R300_CLIPRECT_MASK; ++ } ++ ++ OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | ++ (box.y1 << R300_CLIPRECT_Y_SHIFT)); ++ OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | ++ (box.y2 << R300_CLIPRECT_Y_SHIFT)); ++ ++ } ++ ++ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]); ++ ++ /* TODO/SECURITY: Force scissors to a safe value, otherwise the ++ * client might be able to trample over memory. ++ * The impact should be very limited, but I'd rather be safe than ++ * sorry. ++ */ ++ OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1)); ++ OUT_RING(0); ++ OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK); ++ ADVANCE_RING(); ++ } else { ++ /* Why we allow zero cliprect rendering: ++ * There are some commands in a command buffer that must be submitted ++ * even when there are no cliprects, e.g. DMA buffer discard ++ * or state setting (though state setting could be avoided by ++ * simulating a loss of context). ++ * ++ * Now since the cmdbuf interface is so chaotic right now (and is ++ * bound to remain that way for a bit until things settle down), ++ * it is basically impossible to filter out the commands that are ++ * necessary and those that aren't. ++ * ++ * So I choose the safe way and don't do any filtering at all; ++ * instead, I simply set up the engine so that all rendering ++ * can't produce any fragments. ++ */ ++ BEGIN_RING(2); ++ OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0); ++ ADVANCE_RING(); ++ } ++ ++ /* flus cache and wait idle clean after cliprect change */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ ADVANCE_RING(); ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ return 0; ++} ++ ++static u8 r300_reg_flags[0x10000 >> 2]; ++ ++void r300_init_reg_flags(struct drm_device *dev) ++{ ++ int i; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ memset(r300_reg_flags, 0, 0x10000 >> 2); ++#define ADD_RANGE_MARK(reg, count,mark) \ ++ for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ ++ r300_reg_flags[i]|=(mark); ++ ++#define MARK_SAFE 1 ++#define MARK_CHECK_OFFSET 2 ++ ++#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) ++ ++ /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */ ++ ADD_RANGE(R300_SE_VPORT_XSCALE, 6); ++ ADD_RANGE(R300_VAP_CNTL, 1); ++ ADD_RANGE(R300_SE_VTE_CNTL, 2); ++ ADD_RANGE(0x2134, 2); ++ ADD_RANGE(R300_VAP_CNTL_STATUS, 1); ++ ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2); ++ ADD_RANGE(0x21DC, 1); ++ ADD_RANGE(R300_VAP_UNKNOWN_221C, 1); ++ ADD_RANGE(R300_VAP_CLIP_X_0, 4); ++ ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1); ++ ADD_RANGE(R300_VAP_UNKNOWN_2288, 1); ++ ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); ++ ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); ++ ADD_RANGE(R300_GB_ENABLE, 1); ++ ADD_RANGE(R300_GB_MSPOS0, 5); ++ ADD_RANGE(R300_TX_INVALTAGS, 1); ++ ADD_RANGE(R300_TX_ENABLE, 1); ++ ADD_RANGE(0x4200, 4); ++ ADD_RANGE(0x4214, 1); ++ ADD_RANGE(R300_RE_POINTSIZE, 1); ++ ADD_RANGE(0x4230, 3); ++ ADD_RANGE(R300_RE_LINE_CNT, 1); ++ ADD_RANGE(R300_RE_UNK4238, 1); ++ ADD_RANGE(0x4260, 3); ++ ADD_RANGE(R300_RE_SHADE, 4); ++ ADD_RANGE(R300_RE_POLYGON_MODE, 5); ++ ADD_RANGE(R300_RE_ZBIAS_CNTL, 1); ++ ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4); ++ ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1); ++ ADD_RANGE(R300_RE_CULL_CNTL, 1); ++ ADD_RANGE(0x42C0, 2); ++ ADD_RANGE(R300_RS_CNTL_0, 2); ++ ++ ADD_RANGE(R300_SC_HYPERZ, 2); ++ ADD_RANGE(0x43E8, 1); ++ ++ ADD_RANGE(0x46A4, 5); ++ ++ ADD_RANGE(R300_RE_FOG_STATE, 1); ++ ADD_RANGE(R300_FOG_COLOR_R, 3); ++ ADD_RANGE(R300_PP_ALPHA_TEST, 2); ++ ADD_RANGE(0x4BD8, 1); ++ ADD_RANGE(R300_PFS_PARAM_0_X, 64); ++ ADD_RANGE(0x4E00, 1); ++ ADD_RANGE(R300_RB3D_CBLEND, 2); ++ ADD_RANGE(R300_RB3D_COLORMASK, 1); ++ ADD_RANGE(R300_RB3D_BLEND_COLOR, 3); ++ ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */ ++ ADD_RANGE(R300_RB3D_COLORPITCH0, 1); ++ ADD_RANGE(0x4E50, 9); ++ ADD_RANGE(0x4E88, 1); ++ ADD_RANGE(0x4EA0, 2); ++ ADD_RANGE(R300_ZB_CNTL, 3); ++ ADD_RANGE(R300_ZB_FORMAT, 4); ++ ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ ++ ADD_RANGE(R300_ZB_DEPTHPITCH, 1); ++ ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1); ++ ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13); ++ ++ ADD_RANGE(R300_TX_FILTER_0, 16); ++ ADD_RANGE(R300_TX_FILTER1_0, 16); ++ ADD_RANGE(R300_TX_SIZE_0, 16); ++ ADD_RANGE(R300_TX_FORMAT_0, 16); ++ ADD_RANGE(R300_TX_PITCH_0, 16); ++ /* Texture offset is dangerous and needs more checking */ ++ ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); ++ ADD_RANGE(R300_TX_CHROMA_KEY_0, 16); ++ ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); ++ ++ /* Sporadic registers used as primitives are emitted */ ++ ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1); ++ ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1); ++ ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); ++ ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ ADD_RANGE(R500_VAP_INDEX_OFFSET, 1); ++ ADD_RANGE(R500_US_CONFIG, 2); ++ ADD_RANGE(R500_US_CODE_ADDR, 3); ++ ADD_RANGE(R500_US_FC_CTRL, 1); ++ ADD_RANGE(R500_RS_IP_0, 16); ++ ADD_RANGE(R500_RS_INST_0, 16); ++ ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); ++ ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2); ++ ADD_RANGE(R500_ZB_FIFO_SIZE, 2); ++ } else { ++ ADD_RANGE(R300_PFS_CNTL_0, 3); ++ ADD_RANGE(R300_PFS_NODE_0, 4); ++ ADD_RANGE(R300_PFS_TEXI_0, 64); ++ ADD_RANGE(R300_PFS_INSTR0_0, 64); ++ ADD_RANGE(R300_PFS_INSTR1_0, 64); ++ ADD_RANGE(R300_PFS_INSTR2_0, 64); ++ ADD_RANGE(R300_PFS_INSTR3_0, 64); ++ ADD_RANGE(R300_RS_INTERP_0, 8); ++ ADD_RANGE(R300_RS_ROUTE_0, 8); ++ ++ } ++} ++ ++static __inline__ int r300_check_range(unsigned reg, int count) ++{ ++ int i; ++ if (reg & ~0xffff) ++ return -1; ++ for (i = (reg >> 2); i < (reg >> 2) + count; i++) ++ if (r300_reg_flags[i] != MARK_SAFE) ++ return 1; ++ return 0; ++} ++ ++static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * ++ dev_priv, ++ drm_radeon_kcmd_buffer_t ++ * cmdbuf, ++ drm_r300_cmd_header_t ++ header) ++{ ++ int reg; ++ int sz; ++ int i; ++ int values[64]; ++ RING_LOCALS; ++ ++ sz = header.packet0.count; ++ reg = (header.packet0.reghi << 8) | header.packet0.reglo; ++ ++ if ((sz > 64) || (sz < 0)) { ++ DRM_ERROR ++ ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", ++ reg, sz); ++ return -EINVAL; ++ } ++ for (i = 0; i < sz; i++) { ++ values[i] = ((int *)cmdbuf->buf)[i]; ++ switch (r300_reg_flags[(reg >> 2) + i]) { ++ case MARK_SAFE: ++ break; ++ case MARK_CHECK_OFFSET: ++ if (!radeon_check_offset(dev_priv, (u32) values[i])) { ++ DRM_ERROR ++ ("Offset failed range check (reg=%04x sz=%d)\n", ++ reg, sz); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("Register %04x failed check as flag=%02x\n", ++ reg + i * 4, r300_reg_flags[(reg >> 2) + i]); ++ return -EINVAL; ++ } ++ } ++ ++ BEGIN_RING(1 + sz); ++ OUT_RING(CP_PACKET0(reg, sz - 1)); ++ OUT_RING_TABLE(values, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 4; ++ cmdbuf->bufsz -= sz * 4; ++ ++ return 0; ++} ++ ++/** ++ * Emits a packet0 setting arbitrary registers. ++ * Called by r300_do_cp_cmdbuf. ++ * ++ * Note that checks are performed on contents and addresses of the registers ++ */ ++static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int reg; ++ int sz; ++ RING_LOCALS; ++ ++ sz = header.packet0.count; ++ reg = (header.packet0.reghi << 8) | header.packet0.reglo; ++ ++ DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz); ++ if (!sz) ++ return 0; ++ ++ if (sz * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ if (reg + sz * 4 >= 0x10000) { ++ DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, ++ sz); ++ return -EINVAL; ++ } ++ ++ if (r300_check_range(reg, sz)) { ++ /* go and check everything */ ++ return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, ++ header); ++ } ++ /* the rest of the data is safe to emit, whatever the values the user passed */ ++ ++ BEGIN_RING(1 + sz); ++ OUT_RING(CP_PACKET0(reg, sz - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 4; ++ cmdbuf->bufsz -= sz * 4; ++ ++ return 0; ++} ++ ++/** ++ * Uploads user-supplied vertex program instructions or parameters onto ++ * the graphics card. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int sz; ++ int addr; ++ RING_LOCALS; ++ ++ sz = header.vpu.count; ++ addr = (header.vpu.adrhi << 8) | header.vpu.adrlo; ++ ++ if (!sz) ++ return 0; ++ if (sz * 16 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ /* VAP is very sensitive so we purge cache before we program it ++ * and we also flush its state before & after */ ++ BEGIN_RING(6); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ BEGIN_RING(3 + sz * 4); ++ OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); ++ OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * 16; ++ cmdbuf->bufsz -= sz * 16; ++ ++ return 0; ++} ++ ++/** ++ * Emit a clear packet from userspace. ++ * Called by r300_emit_packet3. ++ */ ++static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ RING_LOCALS; ++ ++ if (8 * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(10); ++ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); ++ OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | ++ (1 << R300_PRIM_NUM_VERTICES_SHIFT)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, 8); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(R300_RB3D_DC_FLUSH); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush flag */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED; ++ ++ cmdbuf->buf += 8 * 4; ++ cmdbuf->bufsz -= 8 * 4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ u32 header) ++{ ++ int count, i, k; ++#define MAX_ARRAY_PACKET 64 ++ u32 payload[MAX_ARRAY_PACKET]; ++ u32 narrays; ++ RING_LOCALS; ++ ++ count = (header >> 16) & 0x3fff; ++ ++ if ((count + 1) > MAX_ARRAY_PACKET) { ++ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", ++ count); ++ return -EINVAL; ++ } ++ memset(payload, 0, MAX_ARRAY_PACKET * 4); ++ memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); ++ ++ /* carefully check packet contents */ ++ ++ narrays = payload[0]; ++ k = 0; ++ i = 1; ++ while ((k < narrays) && (i < (count + 1))) { ++ i++; /* skip attribute field */ ++ if (!radeon_check_offset(dev_priv, payload[i])) { ++ DRM_ERROR ++ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ if (k == narrays) ++ break; ++ /* have one more to process, they come in pairs */ ++ if (!radeon_check_offset(dev_priv, payload[i])) { ++ DRM_ERROR ++ ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ } ++ /* do the counts match what we expect ? */ ++ if ((k != narrays) || (i != (count + 1))) { ++ DRM_ERROR ++ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", ++ k, i, narrays, count + 1); ++ return -EINVAL; ++ } ++ ++ /* all clear, output packet */ ++ ++ BEGIN_RING(count + 2); ++ OUT_RING(header); ++ OUT_RING_TABLE(payload, count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count + 2) * 4; ++ cmdbuf->bufsz -= (count + 2) * 4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 *cmd = (u32 *) cmdbuf->buf; ++ int count, ret; ++ RING_LOCALS; ++ ++ count=(cmd[0]>>16) & 0x3fff; ++ ++ if (cmd[0] & 0x8000) { ++ u32 offset; ++ ++ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL ++ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[2] << 10; ++ ret = !radeon_check_offset(dev_priv, offset); ++ if (ret) { ++ DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); ++ return -EINVAL; ++ } ++ } ++ ++ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && ++ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[3] << 10; ++ ret = !radeon_check_offset(dev_priv, offset); ++ if (ret) { ++ DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); ++ return -EINVAL; ++ } ++ ++ } ++ } ++ ++ BEGIN_RING(count+2); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count+2)*4; ++ cmdbuf->bufsz -= (count+2)*4; ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 *cmd; ++ int count; ++ int expected_count; ++ RING_LOCALS; ++ ++ cmd = (u32 *) cmdbuf->buf; ++ count = (cmd[0]>>16) & 0x3fff; ++ expected_count = cmd[1] >> 16; ++ if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) ++ expected_count = (expected_count+1)/2; ++ ++ if (count && count != expected_count) { ++ DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n", ++ count, expected_count); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(count+2); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count+2)*4; ++ cmdbuf->bufsz -= (count+2)*4; ++ ++ if (!count) { ++ drm_r300_cmd_header_t header; ++ ++ if (cmdbuf->bufsz < 4*4 + sizeof(header)) { ++ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); ++ return -EINVAL; ++ } ++ ++ header.u = *(unsigned int *)cmdbuf->buf; ++ ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ cmd = (u32 *) cmdbuf->buf; ++ ++ if (header.header.cmd_type != R300_CMD_PACKET3 || ++ header.packet3.packet != R300_CMD_PACKET3_RAW || ++ cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { ++ DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); ++ return -EINVAL; ++ } ++ ++ if ((cmd[1] & 0x8000ffff) != 0x80000810) { ++ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); ++ return -EINVAL; ++ } ++ if (!radeon_check_offset(dev_priv, cmd[2])) { ++ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); ++ return -EINVAL; ++ } ++ if (cmd[3] != expected_count) { ++ DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", ++ cmd[3], expected_count); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(4); ++ OUT_RING(cmd[0]); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += 4*4; ++ cmdbuf->bufsz -= 4*4; ++ } ++ ++ return 0; ++} ++ ++static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ u32 header; ++ int count; ++ RING_LOCALS; ++ ++ if (4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ /* Fixme !! This simply emits a packet without much checking. ++ We need to be smarter. */ ++ ++ /* obtain first word - actual packet3 header */ ++ header = *(u32 *) cmdbuf->buf; ++ ++ /* Is it packet 3 ? */ ++ if ((header >> 30) != 0x3) { ++ DRM_ERROR("Not a packet3 header (0x%08x)\n", header); ++ return -EINVAL; ++ } ++ ++ count = (header >> 16) & 0x3fff; ++ ++ /* Check again now that we know how much data to expect */ ++ if ((count + 2) * 4 > cmdbuf->bufsz) { ++ DRM_ERROR ++ ("Expected packet3 of length %d but have only %d bytes left\n", ++ (count + 2) * 4, cmdbuf->bufsz); ++ return -EINVAL; ++ } ++ ++ /* Is it a packet type we know about ? */ ++ switch (header & 0xff00) { ++ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ ++ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); ++ ++ case RADEON_CNTL_BITBLT_MULTI: ++ return r300_emit_bitblt_multi(dev_priv, cmdbuf); ++ ++ case RADEON_CP_INDX_BUFFER: ++ DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n"); ++ return -EINVAL; ++ case RADEON_CP_3D_DRAW_IMMD_2: ++ /* triggers drawing using in-packet vertex data */ ++ case RADEON_CP_3D_DRAW_VBUF_2: ++ /* triggers drawing of vertex buffers setup elsewhere */ ++ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | ++ RADEON_PURGE_EMITED); ++ break; ++ case RADEON_CP_3D_DRAW_INDX_2: ++ /* triggers drawing using indices to vertex buffer */ ++ /* whenever we send vertex we clear flush & purge */ ++ dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED | ++ RADEON_PURGE_EMITED); ++ return r300_emit_draw_indx_2(dev_priv, cmdbuf); ++ case RADEON_WAIT_FOR_IDLE: ++ case RADEON_CP_NOP: ++ /* these packets are safe */ ++ break; ++ default: ++ DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(count + 2); ++ OUT_RING(header); ++ OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += (count + 2) * 4; ++ cmdbuf->bufsz -= (count + 2) * 4; ++ ++ return 0; ++} ++ ++/** ++ * Emit a rendering packet3 from userspace. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int n; ++ int ret; ++ char *orig_buf = cmdbuf->buf; ++ int orig_bufsz = cmdbuf->bufsz; ++ ++ /* This is a do-while-loop so that we run the interior at least once, ++ * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. ++ */ ++ n = 0; ++ do { ++ if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) { ++ ret = r300_emit_cliprects(dev_priv, cmdbuf, n); ++ if (ret) ++ return ret; ++ ++ cmdbuf->buf = orig_buf; ++ cmdbuf->bufsz = orig_bufsz; ++ } ++ ++ switch (header.packet3.packet) { ++ case R300_CMD_PACKET3_CLEAR: ++ DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n"); ++ ret = r300_emit_clear(dev_priv, cmdbuf); ++ if (ret) { ++ DRM_ERROR("r300_emit_clear failed\n"); ++ return ret; ++ } ++ break; ++ ++ case R300_CMD_PACKET3_RAW: ++ DRM_DEBUG("R300_CMD_PACKET3_RAW\n"); ++ ret = r300_emit_raw_packet3(dev_priv, cmdbuf); ++ if (ret) { ++ DRM_ERROR("r300_emit_raw_packet3 failed\n"); ++ return ret; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("bad packet3 type %i at %p\n", ++ header.packet3.packet, ++ cmdbuf->buf - sizeof(header)); ++ return -EINVAL; ++ } ++ ++ n += R300_SIMULTANEOUS_CLIPRECTS; ++ } while (n < cmdbuf->nbox); ++ ++ return 0; ++} ++ ++/* Some of the R300 chips seem to be extremely touchy about the two registers ++ * that are configured in r300_pacify. ++ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace ++ * sends a command buffer that contains only state setting commands and a ++ * vertex program/parameter upload sequence, this will eventually lead to a ++ * lockup, unless the sequence is bracketed by calls to r300_pacify. ++ * So we should take great care to *always* call r300_pacify before ++ * *anything* 3D related, and again afterwards. This is what the ++ * call bracket in r300_do_cp_cmdbuf is for. ++ */ ++ ++/** ++ * Emit the sequence to pacify R300. ++ */ ++static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv) ++{ ++ uint32_t cache_z, cache_3d, cache_2d; ++ RING_LOCALS; ++ ++ cache_z = R300_ZC_FLUSH; ++ cache_2d = R300_RB2D_DC_FLUSH; ++ cache_3d = R300_RB3D_DC_FLUSH; ++ if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) { ++ /* we can purge, primitive where draw since last purge */ ++ cache_z |= R300_ZC_FREE; ++ cache_2d |= R300_RB2D_DC_FREE; ++ cache_3d |= R300_RB3D_DC_FREE; ++ } ++ ++ /* flush & purge zbuffer */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_z); ++ ADVANCE_RING(); ++ /* flush & purge 3d */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_3d); ++ ADVANCE_RING(); ++ /* flush & purge texture */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ /* FIXME: is this one really needed ? */ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0)); ++ OUT_RING(0); ++ ADVANCE_RING(); ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_3D_IDLECLEAN); ++ ADVANCE_RING(); ++ /* flush & purge 2d through E2 as RB2D will trigger lockup */ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0)); ++ OUT_RING(cache_2d); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(RADEON_WAIT_2D_IDLECLEAN | ++ RADEON_WAIT_HOST_IDLECLEAN); ++ ADVANCE_RING(); ++ /* set flush & purge flags */ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; ++} ++ ++/** ++ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state. ++ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must ++ * be careful about how this function is called. ++ */ ++static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ ++ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ buf->pending = 1; ++ buf->used = 0; ++} ++ ++static void r300_cmd_wait(drm_radeon_private_t * dev_priv, ++ drm_r300_cmd_header_t header) ++{ ++ u32 wait_until; ++ RING_LOCALS; ++ ++ if (!header.wait.flags) ++ return; ++ ++ wait_until = 0; ++ ++ switch(header.wait.flags) { ++ case R300_WAIT_2D: ++ wait_until = RADEON_WAIT_2D_IDLE; ++ break; ++ case R300_WAIT_3D: ++ wait_until = RADEON_WAIT_3D_IDLE; ++ break; ++ case R300_NEW_WAIT_2D_3D: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE; ++ break; ++ case R300_NEW_WAIT_2D_2D_CLEAN: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; ++ break; ++ case R300_NEW_WAIT_3D_3D_CLEAN: ++ wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; ++ break; ++ case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN: ++ wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN; ++ wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN; ++ break; ++ default: ++ return; ++ } ++ ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0)); ++ OUT_RING(wait_until); ++ ADVANCE_RING(); ++} ++ ++static int r300_scratch(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ u32 *ref_age_base; ++ u32 i, buf_idx, h_pending; ++ RING_LOCALS; ++ ++ if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) { ++ return -EINVAL; ++ } ++ ++ if (header.scratch.reg >= 5) { ++ return -EINVAL; ++ } ++ ++ dev_priv->scratch_ages[header.scratch.reg] ++; ++ ++ ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); ++ ++ cmdbuf->buf += sizeof(uint64_t); ++ cmdbuf->bufsz -= sizeof(uint64_t); ++ ++ for (i=0; i < header.scratch.n_bufs; i++) { ++ buf_idx = *(u32 *)cmdbuf->buf; ++ buf_idx *= 2; /* 8 bytes per buf */ ++ ++ if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ if (h_pending == 0) { ++ return -EINVAL; ++ } ++ ++ h_pending--; ++ ++ if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { ++ return -EINVAL; ++ } ++ ++ cmdbuf->buf += sizeof(buf_idx); ++ cmdbuf->bufsz -= sizeof(buf_idx); ++ } ++ ++ BEGIN_RING(2); ++ OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) ); ++ OUT_RING( dev_priv->scratch_ages[header.scratch.reg] ); ++ ADVANCE_RING(); ++ ++ return 0; ++} ++ ++/** ++ * Uploads user-supplied vertex program instructions or parameters onto ++ * the graphics card. ++ * Called by r300_do_cp_cmdbuf. ++ */ ++static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ drm_r300_cmd_header_t header) ++{ ++ int sz; ++ int addr; ++ int type; ++ int clamp; ++ int stride; ++ RING_LOCALS; ++ ++ sz = header.r500fp.count; ++ /* address is 9 bits 0 - 8, bit 1 of flags is part of address */ ++ addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; ++ ++ type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); ++ clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); ++ ++ addr |= (type << 16); ++ addr |= (clamp << 17); ++ ++ stride = type ? 4 : 6; ++ ++ DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); ++ if (!sz) ++ return 0; ++ if (sz * stride * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(3 + sz * stride); ++ OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); ++ OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); ++ OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); ++ ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * stride * 4; ++ cmdbuf->bufsz -= sz * stride * 4; ++ ++ return 0; ++} ++ ++ ++/** ++ * Parses and validates a user-supplied command buffer and emits appropriate ++ * commands on the DMA ring buffer. ++ * Called by the ioctl handler function radeon_cp_cmdbuf. ++ */ ++int r300_do_cp_cmdbuf(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf = NULL; ++ int emit_dispatch_age = 0; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ /* pacify */ ++ r300_pacify(dev_priv); ++ ++ if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { ++ ret = r300_emit_cliprects(dev_priv, cmdbuf, 0); ++ if (ret) ++ goto cleanup; ++ } ++ ++ while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { ++ int idx; ++ drm_r300_cmd_header_t header; ++ ++ header.u = *(unsigned int *)cmdbuf->buf; ++ ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ ++ switch (header.header.cmd_type) { ++ case R300_CMD_PACKET0: ++ ret = r300_emit_packet0(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_packet0 failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_VPU: ++ DRM_DEBUG("R300_CMD_VPU\n"); ++ ret = r300_emit_vpu(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_vpu failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_PACKET3: ++ DRM_DEBUG("R300_CMD_PACKET3\n"); ++ ret = r300_emit_packet3(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_packet3 failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_END3D: ++ DRM_DEBUG("R300_CMD_END3D\n"); ++ /* TODO: ++ Ideally userspace driver should not need to issue this call, ++ i.e. the drm driver should issue it automatically and prevent ++ lockups. ++ ++ In practice, we do not understand why this call is needed and what ++ it does (except for some vague guesses that it has to do with cache ++ coherence) and so the user space driver does it. ++ ++ Once we are sure which uses prevent lockups the code could be moved ++ into the kernel and the userspace driver will not ++ need to use this command. ++ ++ Note that issuing this command does not hurt anything ++ except, possibly, performance */ ++ r300_pacify(dev_priv); ++ break; ++ ++ case R300_CMD_CP_DELAY: ++ /* simple enough, we can do it here */ ++ DRM_DEBUG("R300_CMD_CP_DELAY\n"); ++ { ++ int i; ++ RING_LOCALS; ++ ++ BEGIN_RING(header.delay.count); ++ for (i = 0; i < header.delay.count; i++) ++ OUT_RING(RADEON_CP_PACKET2); ++ ADVANCE_RING(); ++ } ++ break; ++ ++ case R300_CMD_DMA_DISCARD: ++ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); ++ idx = header.dma.buf_idx; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv || buf->pending) { ++ DRM_ERROR("bad buffer %p %p %d\n", ++ buf->file_priv, file_priv, ++ buf->pending); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ ++ emit_dispatch_age = 1; ++ r300_discard_buffer(dev, buf); ++ break; ++ ++ case R300_CMD_WAIT: ++ DRM_DEBUG("R300_CMD_WAIT\n"); ++ r300_cmd_wait(dev_priv, header); ++ break; ++ ++ case R300_CMD_SCRATCH: ++ DRM_DEBUG("R300_CMD_SCRATCH\n"); ++ ret = r300_scratch(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_scratch failed\n"); ++ goto cleanup; ++ } ++ break; ++ ++ case R300_CMD_R500FP: ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) { ++ DRM_ERROR("Calling r500 command on r300 card\n"); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ DRM_DEBUG("R300_CMD_R500FP\n"); ++ ret = r300_emit_r500fp(dev_priv, cmdbuf, header); ++ if (ret) { ++ DRM_ERROR("r300_emit_r500fp failed\n"); ++ goto cleanup; ++ } ++ break; ++ default: ++ DRM_ERROR("bad cmd_type %i at %p\n", ++ header.header.cmd_type, ++ cmdbuf->buf - sizeof(header)); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ } ++ ++ DRM_DEBUG("END\n"); ++ ++ cleanup: ++ r300_pacify(dev_priv); ++ ++ /* We emit the vertex buffer age here, outside the pacifier "brackets" ++ * for two reasons: ++ * (1) This may coalesce multiple age emissions into a single one and ++ * (2) more importantly, some chips lock up hard when scratch registers ++ * are written inside the pacifier bracket. ++ */ ++ if (emit_dispatch_age) { ++ RING_LOCALS; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); ++ ADVANCE_RING(); ++ } ++ ++ COMMIT_RING(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/r300_reg.h git-nokia/drivers/gpu/drm-tungsten/r300_reg.h +--- git/drivers/gpu/drm-tungsten/r300_reg.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/r300_reg.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1778 @@ ++/************************************************************************** ++ ++Copyright (C) 2004-2005 Nicolai Haehnle et al. ++ ++Permission is hereby granted, free of charge, to any person obtaining a ++copy of this software and associated documentation files (the "Software"), ++to deal in the Software without restriction, including without limitation ++on the rights to use, copy, modify, merge, publish, distribute, sub ++license, and/or sell copies of the Software, and to permit persons to whom ++the Software is furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice (including the next ++paragraph) shall be included in all copies or substantial portions of the ++Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, ++DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++**************************************************************************/ ++ ++/* *INDENT-OFF* */ ++ ++#ifndef _R300_REG_H ++#define _R300_REG_H ++ ++#define R300_MC_INIT_MISC_LAT_TIMER 0x180 ++# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 ++# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 ++# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8 ++# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12 ++# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16 ++# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20 ++# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 ++# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 ++ ++ ++#define R300_MC_INIT_GFX_LAT_TIMER 0x154 ++# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 ++# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 ++# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8 ++# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12 ++# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16 ++# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20 ++# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24 ++# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28 ++ ++/* ++ * This file contains registers and constants for the R300. They have been ++ * found mostly by examining command buffers captured using glxtest, as well ++ * as by extrapolating some known registers and constants from the R200. ++ * I am fairly certain that they are correct unless stated otherwise ++ * in comments. ++ */ ++ ++#define R300_SE_VPORT_XSCALE 0x1D98 ++#define R300_SE_VPORT_XOFFSET 0x1D9C ++#define R300_SE_VPORT_YSCALE 0x1DA0 ++#define R300_SE_VPORT_YOFFSET 0x1DA4 ++#define R300_SE_VPORT_ZSCALE 0x1DA8 ++#define R300_SE_VPORT_ZOFFSET 0x1DAC ++ ++ ++/* ++ * Vertex Array Processing (VAP) Control ++ * Stolen from r200 code from Christoph Brill (It's a guess!) ++ */ ++#define R300_VAP_CNTL 0x2080 ++ ++/* This register is written directly and also starts data section ++ * in many 3d CP_PACKET3's ++ */ ++#define R300_VAP_VF_CNTL 0x2084 ++# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0 ++# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0) ++# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0) ++# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0) ++# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0) ++# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0) ++# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0) ++# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0) ++ ++# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4 ++ /* State based - direct writes to registers trigger vertex ++ generation */ ++# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4) ++# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4) ++ ++ /* I don't think I saw these three used.. */ ++# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6 ++# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9 ++# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10 ++ ++ /* index size - when not set the indices are assumed to be 16 bit */ ++# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11) ++ /* number of vertices */ ++# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16 ++ ++/* BEGIN: Wild guesses */ ++#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090 ++# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0) ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1) ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */ ++# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ ++ ++#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 ++ /* each of the following is 3 bits wide, specifies number ++ of components */ ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 ++# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 ++/* END: Wild guesses */ ++ ++#define R300_SE_VTE_CNTL 0x20b0 ++# define R300_VPORT_X_SCALE_ENA 0x00000001 ++# define R300_VPORT_X_OFFSET_ENA 0x00000002 ++# define R300_VPORT_Y_SCALE_ENA 0x00000004 ++# define R300_VPORT_Y_OFFSET_ENA 0x00000008 ++# define R300_VPORT_Z_SCALE_ENA 0x00000010 ++# define R300_VPORT_Z_OFFSET_ENA 0x00000020 ++# define R300_VTX_XY_FMT 0x00000100 ++# define R300_VTX_Z_FMT 0x00000200 ++# define R300_VTX_W0_FMT 0x00000400 ++# define R300_VTX_W0_NORMALIZE 0x00000800 ++# define R300_VTX_ST_DENORMALIZED 0x00001000 ++ ++/* BEGIN: Vertex data assembly - lots of uncertainties */ ++ ++/* gap */ ++ ++#define R300_VAP_CNTL_STATUS 0x2140 ++# define R300_VC_NO_SWAP (0 << 0) ++# define R300_VC_16BIT_SWAP (1 << 0) ++# define R300_VC_32BIT_SWAP (2 << 0) ++# define R300_VAP_TCL_BYPASS (1 << 8) ++ ++/* gap */ ++ ++/* Where do we get our vertex data? ++ * ++ * Vertex data either comes either from immediate mode registers or from ++ * vertex arrays. ++ * There appears to be no mixed mode (though we can force the pitch of ++ * vertex arrays to 0, effectively reusing the same element over and over ++ * again). ++ * ++ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure ++ * if these registers influence vertex array processing. ++ * ++ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3. ++ * ++ * In both cases, vertex attributes are then passed through INPUT_ROUTE. ++ * ++ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data ++ * into the vertex processor's input registers. ++ * The first word routes the first input, the second word the second, etc. ++ * The corresponding input is routed into the register with the given index. ++ * The list is ended by a word with INPUT_ROUTE_END set. ++ * ++ * Always set COMPONENTS_4 in immediate mode. ++ */ ++ ++#define R300_VAP_INPUT_ROUTE_0_0 0x2150 ++# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0) ++# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */ ++# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8 ++# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */ ++# define R300_VAP_INPUT_ROUTE_END (1 << 13) ++# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */ ++# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */ ++#define R300_VAP_INPUT_ROUTE_0_1 0x2154 ++#define R300_VAP_INPUT_ROUTE_0_2 0x2158 ++#define R300_VAP_INPUT_ROUTE_0_3 0x215C ++#define R300_VAP_INPUT_ROUTE_0_4 0x2160 ++#define R300_VAP_INPUT_ROUTE_0_5 0x2164 ++#define R300_VAP_INPUT_ROUTE_0_6 0x2168 ++#define R300_VAP_INPUT_ROUTE_0_7 0x216C ++ ++/* gap */ ++ ++/* Notes: ++ * - always set up to produce at least two attributes: ++ * if vertex program uses only position, fglrx will set normal, too ++ * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal. ++ */ ++#define R300_VAP_INPUT_CNTL_0 0x2180 ++# define R300_INPUT_CNTL_0_COLOR 0x00000001 ++#define R300_VAP_INPUT_CNTL_1 0x2184 ++# define R300_INPUT_CNTL_POS 0x00000001 ++# define R300_INPUT_CNTL_NORMAL 0x00000002 ++# define R300_INPUT_CNTL_COLOR 0x00000004 ++# define R300_INPUT_CNTL_TC0 0x00000400 ++# define R300_INPUT_CNTL_TC1 0x00000800 ++# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */ ++# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */ ++# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */ ++# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */ ++# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */ ++# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */ ++ ++/* gap */ ++ ++/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0 ++ * are set to a swizzling bit pattern, other words are 0. ++ * ++ * In immediate mode, the pattern is always set to xyzw. In vertex array ++ * mode, the swizzling pattern is e.g. used to set zw components in texture ++ * coordinates with only tweo components. ++ */ ++#define R300_VAP_INPUT_ROUTE_1_0 0x21E0 ++# define R300_INPUT_ROUTE_SELECT_X 0 ++# define R300_INPUT_ROUTE_SELECT_Y 1 ++# define R300_INPUT_ROUTE_SELECT_Z 2 ++# define R300_INPUT_ROUTE_SELECT_W 3 ++# define R300_INPUT_ROUTE_SELECT_ZERO 4 ++# define R300_INPUT_ROUTE_SELECT_ONE 5 ++# define R300_INPUT_ROUTE_SELECT_MASK 7 ++# define R300_INPUT_ROUTE_X_SHIFT 0 ++# define R300_INPUT_ROUTE_Y_SHIFT 3 ++# define R300_INPUT_ROUTE_Z_SHIFT 6 ++# define R300_INPUT_ROUTE_W_SHIFT 9 ++# define R300_INPUT_ROUTE_ENABLE (15 << 12) ++#define R300_VAP_INPUT_ROUTE_1_1 0x21E4 ++#define R300_VAP_INPUT_ROUTE_1_2 0x21E8 ++#define R300_VAP_INPUT_ROUTE_1_3 0x21EC ++#define R300_VAP_INPUT_ROUTE_1_4 0x21F0 ++#define R300_VAP_INPUT_ROUTE_1_5 0x21F4 ++#define R300_VAP_INPUT_ROUTE_1_6 0x21F8 ++#define R300_VAP_INPUT_ROUTE_1_7 0x21FC ++ ++/* END: Vertex data assembly */ ++ ++/* gap */ ++ ++/* BEGIN: Upload vertex program and data */ ++ ++/* ++ * The programmable vertex shader unit has a memory bank of unknown size ++ * that can be written to in 16 byte units by writing the address into ++ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs). ++ * ++ * Pointers into the memory bank are always in multiples of 16 bytes. ++ * ++ * The memory bank is divided into areas with fixed meaning. ++ * ++ * Starting at address UPLOAD_PROGRAM: Vertex program instructions. ++ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB), ++ * whereas the difference between known addresses suggests size 512. ++ * ++ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters. ++ * Native reported limits and the VPI layout suggest size 256, whereas ++ * difference between known addresses suggests size 512. ++ * ++ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the ++ * floating point pointsize. The exact purpose of this state is uncertain, ++ * as there is also the R300_RE_POINTSIZE register. ++ * ++ * Multiple vertex programs and parameter sets can be loaded at once, ++ * which could explain the size discrepancy. ++ */ ++#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200 ++# define R300_PVS_UPLOAD_PROGRAM 0x00000000 ++# define R300_PVS_UPLOAD_PARAMETERS 0x00000200 ++# define R300_PVS_UPLOAD_POINTSIZE 0x00000406 ++ ++/* gap */ ++ ++#define R300_VAP_PVS_UPLOAD_DATA 0x2208 ++ ++/* END: Upload vertex program and data */ ++ ++/* gap */ ++ ++/* I do not know the purpose of this register. However, I do know that ++ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL ++ * for normal rendering. ++ */ ++#define R300_VAP_UNKNOWN_221C 0x221C ++# define R300_221C_NORMAL 0x00000000 ++# define R300_221C_CLEAR 0x0001C000 ++ ++/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first ++ * plane is per-pixel and the second plane is per-vertex. ++ * ++ * This was determined by experimentation alone but I believe it is correct. ++ * ++ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest. ++ */ ++#define R300_VAP_CLIP_X_0 0x2220 ++#define R300_VAP_CLIP_X_1 0x2224 ++#define R300_VAP_CLIP_Y_0 0x2228 ++#define R300_VAP_CLIP_Y_1 0x2230 ++ ++/* gap */ ++ ++/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between ++ * rendering commands and overwriting vertex program parameters. ++ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and ++ * avoids bugs caused by still running shaders reading bad data from memory. ++ */ ++#define R300_VAP_PVS_STATE_FLUSH_REG 0x2284 ++ ++/* Absolutely no clue what this register is about. */ ++#define R300_VAP_UNKNOWN_2288 0x2288 ++# define R300_2288_R300 0x00750000 /* -- nh */ ++# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */ ++ ++/* gap */ ++ ++/* Addresses are relative to the vertex program instruction area of the ++ * memory bank. PROGRAM_END points to the last instruction of the active ++ * program ++ * ++ * The meaning of the two UNKNOWN fields is obviously not known. However, ++ * experiments so far have shown that both *must* point to an instruction ++ * inside the vertex program, otherwise the GPU locks up. ++ * ++ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and ++ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to ++ * position takes place. ++ * ++ * Most likely this is used to ignore rest of the program in cases ++ * where group of verts arent visible. For some reason this "section" ++ * is sometimes accepted other instruction that have no relationship with ++ * position calculations. ++ */ ++#define R300_VAP_PVS_CNTL_1 0x22D0 ++# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 ++# define R300_PVS_CNTL_1_POS_END_SHIFT 10 ++# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 ++/* Addresses are relative the the vertex program parameters area. */ ++#define R300_VAP_PVS_CNTL_2 0x22D4 ++# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 ++# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 ++#define R300_VAP_PVS_CNTL_3 0x22D8 ++# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10 ++# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0 ++ ++/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for ++ * immediate vertices ++ */ ++#define R300_VAP_VTX_COLOR_R 0x2464 ++#define R300_VAP_VTX_COLOR_G 0x2468 ++#define R300_VAP_VTX_COLOR_B 0x246C ++#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */ ++#define R300_VAP_VTX_POS_0_Y_1 0x2494 ++#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */ ++#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */ ++#define R300_VAP_VTX_POS_0_Y_2 0x24A4 ++#define R300_VAP_VTX_POS_0_Z_2 0x24A8 ++/* write 0 to indicate end of packet? */ ++#define R300_VAP_VTX_END_OF_PKT 0x24AC ++ ++/* gap */ ++ ++/* These are values from r300_reg/r300_reg.h - they are known to be correct ++ * and are here so we can use one register file instead of several ++ * - Vladimir ++ */ ++#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000 ++# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5) ++# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16) ++ ++#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004 ++ /* each of the following is 3 bits wide, specifies number ++ of components */ ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 ++# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 ++ ++/* UNK30 seems to enables point to quad transformation on textures ++ * (or something closely related to that). ++ * This bit is rather fatal at the time being due to lackings at pixel ++ * shader side ++ */ ++#define R300_GB_ENABLE 0x4008 ++# define R300_GB_POINT_STUFF_ENABLE (1<<0) ++# define R300_GB_LINE_STUFF_ENABLE (1<<1) ++# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2) ++# define R300_GB_STENCIL_AUTO_ENABLE (1<<4) ++# define R300_GB_UNK31 (1<<31) ++ /* each of the following is 2 bits wide */ ++#define R300_GB_TEX_REPLICATE 0 ++#define R300_GB_TEX_ST 1 ++#define R300_GB_TEX_STR 2 ++# define R300_GB_TEX0_SOURCE_SHIFT 16 ++# define R300_GB_TEX1_SOURCE_SHIFT 18 ++# define R300_GB_TEX2_SOURCE_SHIFT 20 ++# define R300_GB_TEX3_SOURCE_SHIFT 22 ++# define R300_GB_TEX4_SOURCE_SHIFT 24 ++# define R300_GB_TEX5_SOURCE_SHIFT 26 ++# define R300_GB_TEX6_SOURCE_SHIFT 28 ++# define R300_GB_TEX7_SOURCE_SHIFT 30 ++ ++/* MSPOS - positions for multisample antialiasing (?) */ ++#define R300_GB_MSPOS0 0x4010 ++ /* shifts - each of the fields is 4 bits */ ++# define R300_GB_MSPOS0__MS_X0_SHIFT 0 ++# define R300_GB_MSPOS0__MS_Y0_SHIFT 4 ++# define R300_GB_MSPOS0__MS_X1_SHIFT 8 ++# define R300_GB_MSPOS0__MS_Y1_SHIFT 12 ++# define R300_GB_MSPOS0__MS_X2_SHIFT 16 ++# define R300_GB_MSPOS0__MS_Y2_SHIFT 20 ++# define R300_GB_MSPOS0__MSBD0_Y 24 ++# define R300_GB_MSPOS0__MSBD0_X 28 ++ ++#define R300_GB_MSPOS1 0x4014 ++# define R300_GB_MSPOS1__MS_X3_SHIFT 0 ++# define R300_GB_MSPOS1__MS_Y3_SHIFT 4 ++# define R300_GB_MSPOS1__MS_X4_SHIFT 8 ++# define R300_GB_MSPOS1__MS_Y4_SHIFT 12 ++# define R300_GB_MSPOS1__MS_X5_SHIFT 16 ++# define R300_GB_MSPOS1__MS_Y5_SHIFT 20 ++# define R300_GB_MSPOS1__MSBD1 24 ++ ++ ++#define R300_GB_TILE_CONFIG 0x4018 ++# define R300_GB_TILE_ENABLE (1<<0) ++# define R300_GB_TILE_PIPE_COUNT_RV300 0 ++# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1) ++# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1) ++# define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1) ++# define R300_GB_TILE_SIZE_8 0 ++# define R300_GB_TILE_SIZE_16 (1<<4) ++# define R300_GB_TILE_SIZE_32 (2<<4) ++# define R300_GB_SUPER_SIZE_1 (0<<6) ++# define R300_GB_SUPER_SIZE_2 (1<<6) ++# define R300_GB_SUPER_SIZE_4 (2<<6) ++# define R300_GB_SUPER_SIZE_8 (3<<6) ++# define R300_GB_SUPER_SIZE_16 (4<<6) ++# define R300_GB_SUPER_SIZE_32 (5<<6) ++# define R300_GB_SUPER_SIZE_64 (6<<6) ++# define R300_GB_SUPER_SIZE_128 (7<<6) ++# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */ ++# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */ ++# define R300_GB_SUPER_TILE_A 0 ++# define R300_GB_SUPER_TILE_B (1<<15) ++# define R300_GB_SUBPIXEL_1_12 0 ++# define R300_GB_SUBPIXEL_1_16 (1<<16) ++ ++#define R300_GB_FIFO_SIZE 0x4024 ++ /* each of the following is 2 bits wide */ ++#define R300_GB_FIFO_SIZE_32 0 ++#define R300_GB_FIFO_SIZE_64 1 ++#define R300_GB_FIFO_SIZE_128 2 ++#define R300_GB_FIFO_SIZE_256 3 ++# define R300_SC_IFIFO_SIZE_SHIFT 0 ++# define R300_SC_TZFIFO_SIZE_SHIFT 2 ++# define R300_SC_BFIFO_SIZE_SHIFT 4 ++ ++# define R300_US_OFIFO_SIZE_SHIFT 12 ++# define R300_US_WFIFO_SIZE_SHIFT 14 ++ /* the following use the same constants as above, but meaning is ++ is times 2 (i.e. instead of 32 words it means 64 */ ++# define R300_RS_TFIFO_SIZE_SHIFT 6 ++# define R300_RS_CFIFO_SIZE_SHIFT 8 ++# define R300_US_RAM_SIZE_SHIFT 10 ++ /* watermarks, 3 bits wide */ ++# define R300_RS_HIGHWATER_COL_SHIFT 16 ++# define R300_RS_HIGHWATER_TEX_SHIFT 19 ++# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ ++# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 ++ ++#define R300_GB_SELECT 0x401C ++# define R300_GB_FOG_SELECT_C0A 0 ++# define R300_GB_FOG_SELECT_C1A 1 ++# define R300_GB_FOG_SELECT_C2A 2 ++# define R300_GB_FOG_SELECT_C3A 3 ++# define R300_GB_FOG_SELECT_1_1_W 4 ++# define R300_GB_FOG_SELECT_Z 5 ++# define R300_GB_DEPTH_SELECT_Z 0 ++# define R300_GB_DEPTH_SELECT_1_1_W (1<<3) ++# define R300_GB_W_SELECT_1_W 0 ++# define R300_GB_W_SELECT_1 (1<<4) ++ ++#define R300_GB_AA_CONFIG 0x4020 ++# define R300_AA_DISABLE 0x00 ++# define R300_AA_ENABLE 0x01 ++# define R300_AA_SUBSAMPLES_2 0 ++# define R300_AA_SUBSAMPLES_3 (1<<1) ++# define R300_AA_SUBSAMPLES_4 (2<<1) ++# define R300_AA_SUBSAMPLES_6 (3<<1) ++ ++/* gap */ ++ ++/* Zero to flush caches. */ ++#define R300_TX_INVALTAGS 0x4100 ++#define R300_TX_FLUSH 0x0 ++ ++/* The upper enable bits are guessed, based on fglrx reported limits. */ ++#define R300_TX_ENABLE 0x4104 ++# define R300_TX_ENABLE_0 (1 << 0) ++# define R300_TX_ENABLE_1 (1 << 1) ++# define R300_TX_ENABLE_2 (1 << 2) ++# define R300_TX_ENABLE_3 (1 << 3) ++# define R300_TX_ENABLE_4 (1 << 4) ++# define R300_TX_ENABLE_5 (1 << 5) ++# define R300_TX_ENABLE_6 (1 << 6) ++# define R300_TX_ENABLE_7 (1 << 7) ++# define R300_TX_ENABLE_8 (1 << 8) ++# define R300_TX_ENABLE_9 (1 << 9) ++# define R300_TX_ENABLE_10 (1 << 10) ++# define R300_TX_ENABLE_11 (1 << 11) ++# define R300_TX_ENABLE_12 (1 << 12) ++# define R300_TX_ENABLE_13 (1 << 13) ++# define R300_TX_ENABLE_14 (1 << 14) ++# define R300_TX_ENABLE_15 (1 << 15) ++ ++/* The pointsize is given in multiples of 6. The pointsize can be ++ * enormous: Clear() renders a single point that fills the entire ++ * framebuffer. ++ */ ++#define R300_RE_POINTSIZE 0x421C ++# define R300_POINTSIZE_Y_SHIFT 0 ++# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */ ++# define R300_POINTSIZE_X_SHIFT 16 ++# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */ ++# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6) ++ ++/* The line width is given in multiples of 6. ++ * In default mode lines are classified as vertical lines. ++ * HO: horizontal ++ * VE: vertical or horizontal ++ * HO & VE: no classification ++ */ ++#define R300_RE_LINE_CNT 0x4234 ++# define R300_LINESIZE_SHIFT 0 ++# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */ ++# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6) ++# define R300_LINE_CNT_HO (1 << 16) ++# define R300_LINE_CNT_VE (1 << 17) ++ ++/* Some sort of scale or clamp value for texcoordless textures. */ ++#define R300_RE_UNK4238 0x4238 ++ ++/* Something shade related */ ++#define R300_RE_SHADE 0x4274 ++ ++#define R300_RE_SHADE_MODEL 0x4278 ++# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa ++# define R300_RE_SHADE_MODEL_FLAT 0x39595 ++ ++/* Dangerous */ ++#define R300_RE_POLYGON_MODE 0x4288 ++# define R300_PM_ENABLED (1 << 0) ++# define R300_PM_FRONT_POINT (0 << 0) ++# define R300_PM_BACK_POINT (0 << 0) ++# define R300_PM_FRONT_LINE (1 << 4) ++# define R300_PM_FRONT_FILL (1 << 5) ++# define R300_PM_BACK_LINE (1 << 7) ++# define R300_PM_BACK_FILL (1 << 8) ++ ++/* Fog parameters */ ++#define R300_RE_FOG_SCALE 0x4294 ++#define R300_RE_FOG_START 0x4298 ++ ++/* Not sure why there are duplicate of factor and constant values. ++ * My best guess so far is that there are seperate zbiases for test and write. ++ * Ordering might be wrong. ++ * Some of the tests indicate that fgl has a fallback implementation of zbias ++ * via pixel shaders. ++ */ ++#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */ ++#define R300_RE_ZBIAS_T_FACTOR 0x42A4 ++#define R300_RE_ZBIAS_T_CONSTANT 0x42A8 ++#define R300_RE_ZBIAS_W_FACTOR 0x42AC ++#define R300_RE_ZBIAS_W_CONSTANT 0x42B0 ++ ++/* This register needs to be set to (1<<1) for RV350 to correctly ++ * perform depth test (see --vb-triangles in r300_demo) ++ * Don't know about other chips. - Vladimir ++ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on. ++ * My guess is that there are two bits for each zbias primitive ++ * (FILL, LINE, POINT). ++ * One to enable depth test and one for depth write. ++ * Yet this doesnt explain why depth writes work ... ++ */ ++#define R300_RE_OCCLUSION_CNTL 0x42B4 ++# define R300_OCCLUSION_ON (1<<1) ++ ++#define R300_RE_CULL_CNTL 0x42B8 ++# define R300_CULL_FRONT (1 << 0) ++# define R300_CULL_BACK (1 << 1) ++# define R300_FRONT_FACE_CCW (0 << 2) ++# define R300_FRONT_FACE_CW (1 << 2) ++ ++ ++/* BEGIN: Rasterization / Interpolators - many guesses */ ++ ++/* 0_UNKNOWN_18 has always been set except for clear operations. ++ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends ++ * on the vertex program, *not* the fragment program) ++ */ ++#define R300_RS_CNTL_0 0x4300 ++# define R300_RS_CNTL_TC_CNT_SHIFT 2 ++# define R300_RS_CNTL_TC_CNT_MASK (7 << 2) ++ /* number of color interpolators used */ ++# define R300_RS_CNTL_CI_CNT_SHIFT 7 ++# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18) ++ /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n ++ register. */ ++#define R300_RS_CNTL_1 0x4304 ++ ++/* gap */ ++ ++/* Only used for texture coordinates. ++ * Use the source field to route texture coordinate input from the ++ * vertex program to the desired interpolator. Note that the source ++ * field is relative to the outputs the vertex program *actually* ++ * writes. If a vertex program only writes texcoord[1], this will ++ * be source index 0. ++ * Set INTERP_USED on all interpolators that produce data used by ++ * the fragment program. INTERP_USED looks like a swizzling mask, ++ * but I haven't seen it used that way. ++ * ++ * Note: The _UNKNOWN constants are always set in their respective ++ * register. I don't know if this is necessary. ++ */ ++#define R300_RS_INTERP_0 0x4310 ++#define R300_RS_INTERP_1 0x4314 ++# define R300_RS_INTERP_1_UNKNOWN 0x40 ++#define R300_RS_INTERP_2 0x4318 ++# define R300_RS_INTERP_2_UNKNOWN 0x80 ++#define R300_RS_INTERP_3 0x431C ++# define R300_RS_INTERP_3_UNKNOWN 0xC0 ++#define R300_RS_INTERP_4 0x4320 ++#define R300_RS_INTERP_5 0x4324 ++#define R300_RS_INTERP_6 0x4328 ++#define R300_RS_INTERP_7 0x432C ++# define R300_RS_INTERP_SRC_SHIFT 2 ++# define R300_RS_INTERP_SRC_MASK (7 << 2) ++# define R300_RS_INTERP_USED 0x00D10000 ++ ++/* These DWORDs control how vertex data is routed into fragment program ++ * registers, after interpolators. ++ */ ++#define R300_RS_ROUTE_0 0x4330 ++#define R300_RS_ROUTE_1 0x4334 ++#define R300_RS_ROUTE_2 0x4338 ++#define R300_RS_ROUTE_3 0x433C /* GUESS */ ++#define R300_RS_ROUTE_4 0x4340 /* GUESS */ ++#define R300_RS_ROUTE_5 0x4344 /* GUESS */ ++#define R300_RS_ROUTE_6 0x4348 /* GUESS */ ++#define R300_RS_ROUTE_7 0x434C /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_0 0 ++# define R300_RS_ROUTE_SOURCE_INTERP_1 1 ++# define R300_RS_ROUTE_SOURCE_INTERP_2 2 ++# define R300_RS_ROUTE_SOURCE_INTERP_3 3 ++# define R300_RS_ROUTE_SOURCE_INTERP_4 4 ++# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */ ++# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */ ++# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */ ++# define R300_RS_ROUTE_DEST_SHIFT 6 ++# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */ ++ ++/* Special handling for color: When the fragment program uses color, ++ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the ++ * color register index. ++ * ++ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any ++ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state. ++ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly ++ * correct or not. - Oliver. ++ */ ++# define R300_RS_ROUTE_0_COLOR (1 << 14) ++# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17 ++# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */ ++/* As above, but for secondary color */ ++# define R300_RS_ROUTE_1_COLOR1 (1 << 14) ++# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17 ++# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17) ++# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) ++/* END: Rasterization / Interpolators - many guesses */ ++ ++/* Hierarchical Z Enable */ ++#define R300_SC_HYPERZ 0x43a4 ++# define R300_SC_HYPERZ_DISABLE (0 << 0) ++# define R300_SC_HYPERZ_ENABLE (1 << 0) ++# define R300_SC_HYPERZ_MIN (0 << 1) ++# define R300_SC_HYPERZ_MAX (1 << 1) ++# define R300_SC_HYPERZ_ADJ_256 (0 << 2) ++# define R300_SC_HYPERZ_ADJ_128 (1 << 2) ++# define R300_SC_HYPERZ_ADJ_64 (2 << 2) ++# define R300_SC_HYPERZ_ADJ_32 (3 << 2) ++# define R300_SC_HYPERZ_ADJ_16 (4 << 2) ++# define R300_SC_HYPERZ_ADJ_8 (5 << 2) ++# define R300_SC_HYPERZ_ADJ_4 (6 << 2) ++# define R300_SC_HYPERZ_ADJ_2 (7 << 2) ++# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5) ++# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5) ++# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6) ++# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6) ++ ++#define R300_SC_EDGERULE 0x43a8 ++ ++/* BEGIN: Scissors and cliprects */ ++ ++/* There are four clipping rectangles. Their corner coordinates are inclusive. ++ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending ++ * on whether the pixel is inside cliprects 0-3, respectively. For example, ++ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned ++ * the number 3 (binary 0011). ++ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set, ++ * the pixel is rasterized. ++ * ++ * In addition to this, there is a scissors rectangle. Only pixels inside the ++ * scissors rectangle are drawn. (coordinates are inclusive) ++ * ++ * For some reason, the top-left corner of the framebuffer is at (1440, 1440) ++ * for the purpose of clipping and scissors. ++ */ ++#define R300_RE_CLIPRECT_TL_0 0x43B0 ++#define R300_RE_CLIPRECT_BR_0 0x43B4 ++#define R300_RE_CLIPRECT_TL_1 0x43B8 ++#define R300_RE_CLIPRECT_BR_1 0x43BC ++#define R300_RE_CLIPRECT_TL_2 0x43C0 ++#define R300_RE_CLIPRECT_BR_2 0x43C4 ++#define R300_RE_CLIPRECT_TL_3 0x43C8 ++#define R300_RE_CLIPRECT_BR_3 0x43CC ++# define R300_CLIPRECT_OFFSET 1440 ++# define R300_CLIPRECT_MASK 0x1FFF ++# define R300_CLIPRECT_X_SHIFT 0 ++# define R300_CLIPRECT_X_MASK (0x1FFF << 0) ++# define R300_CLIPRECT_Y_SHIFT 13 ++# define R300_CLIPRECT_Y_MASK (0x1FFF << 13) ++#define R300_RE_CLIPRECT_CNTL 0x43D0 ++# define R300_CLIP_OUT (1 << 0) ++# define R300_CLIP_0 (1 << 1) ++# define R300_CLIP_1 (1 << 2) ++# define R300_CLIP_10 (1 << 3) ++# define R300_CLIP_2 (1 << 4) ++# define R300_CLIP_20 (1 << 5) ++# define R300_CLIP_21 (1 << 6) ++# define R300_CLIP_210 (1 << 7) ++# define R300_CLIP_3 (1 << 8) ++# define R300_CLIP_30 (1 << 9) ++# define R300_CLIP_31 (1 << 10) ++# define R300_CLIP_310 (1 << 11) ++# define R300_CLIP_32 (1 << 12) ++# define R300_CLIP_320 (1 << 13) ++# define R300_CLIP_321 (1 << 14) ++# define R300_CLIP_3210 (1 << 15) ++ ++/* gap */ ++ ++#define R300_RE_SCISSORS_TL 0x43E0 ++#define R300_RE_SCISSORS_BR 0x43E4 ++# define R300_SCISSORS_OFFSET 1440 ++# define R300_SCISSORS_X_SHIFT 0 ++# define R300_SCISSORS_X_MASK (0x1FFF << 0) ++# define R300_SCISSORS_Y_SHIFT 13 ++# define R300_SCISSORS_Y_MASK (0x1FFF << 13) ++/* END: Scissors and cliprects */ ++ ++/* BEGIN: Texture specification */ ++ ++/* ++ * The texture specification dwords are grouped by meaning and not by texture ++ * unit. This means that e.g. the offset for texture image unit N is found in ++ * register TX_OFFSET_0 + (4*N) ++ */ ++#define R300_TX_FILTER_0 0x4400 ++# define R300_TX_REPEAT 0 ++# define R300_TX_MIRRORED 1 ++# define R300_TX_CLAMP 4 ++# define R300_TX_CLAMP_TO_EDGE 2 ++# define R300_TX_CLAMP_TO_BORDER 6 ++# define R300_TX_WRAP_S_SHIFT 0 ++# define R300_TX_WRAP_S_MASK (7 << 0) ++# define R300_TX_WRAP_T_SHIFT 3 ++# define R300_TX_WRAP_T_MASK (7 << 3) ++# define R300_TX_WRAP_Q_SHIFT 6 ++# define R300_TX_WRAP_Q_MASK (7 << 6) ++# define R300_TX_MAG_FILTER_NEAREST (1 << 9) ++# define R300_TX_MAG_FILTER_LINEAR (2 << 9) ++# define R300_TX_MAG_FILTER_MASK (3 << 9) ++# define R300_TX_MIN_FILTER_NEAREST (1 << 11) ++# define R300_TX_MIN_FILTER_LINEAR (2 << 11) ++# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11) ++# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11) ++# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) ++# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) ++ ++/* NOTE: NEAREST doesnt seem to exist. ++ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all ++ * anisotropy modes because that would void selected mag filter ++ */ ++# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13) ++# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13) ++# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13) ++# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13) ++# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) ++# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) ++# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) ++# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) ++# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21) ++# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) ++# define R300_TX_MAX_ANISO_MASK (14 << 21) ++ ++#define R300_TX_FILTER1_0 0x4440 ++# define R300_CHROMA_KEY_MODE_DISABLE 0 ++# define R300_CHROMA_KEY_FORCE 1 ++# define R300_CHROMA_KEY_BLEND 2 ++# define R300_MC_ROUND_NORMAL (0<<2) ++# define R300_MC_ROUND_MPEG4 (1<<2) ++# define R300_LOD_BIAS_MASK 0x1fff ++# define R300_EDGE_ANISO_EDGE_DIAG (0<<13) ++# define R300_EDGE_ANISO_EDGE_ONLY (1<<13) ++# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14) ++# define R300_MC_COORD_TRUNCATE_MPEG (1<<14) ++# define R300_TX_TRI_PERF_0_8 (0<<15) ++# define R300_TX_TRI_PERF_1_8 (1<<15) ++# define R300_TX_TRI_PERF_1_4 (2<<15) ++# define R300_TX_TRI_PERF_3_8 (3<<15) ++# define R300_ANISO_THRESHOLD_MASK (7<<17) ++ ++#define R300_TX_SIZE_0 0x4480 ++# define R300_TX_WIDTHMASK_SHIFT 0 ++# define R300_TX_WIDTHMASK_MASK (2047 << 0) ++# define R300_TX_HEIGHTMASK_SHIFT 11 ++# define R300_TX_HEIGHTMASK_MASK (2047 << 11) ++# define R300_TX_UNK23 (1 << 23) ++# define R300_TX_MAX_MIP_LEVEL_SHIFT 26 ++# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26) ++# define R300_TX_SIZE_PROJECTED (1<<30) ++# define R300_TX_SIZE_TXPITCH_EN (1<<31) ++#define R300_TX_FORMAT_0 0x44C0 ++ /* The interpretation of the format word by Wladimir van der Laan */ ++ /* The X, Y, Z and W refer to the layout of the components. ++ They are given meanings as R, G, B and Alpha by the swizzle ++ specification */ ++# define R300_TX_FORMAT_X8 0x0 ++# define R300_TX_FORMAT_X16 0x1 ++# define R300_TX_FORMAT_Y4X4 0x2 ++# define R300_TX_FORMAT_Y8X8 0x3 ++# define R300_TX_FORMAT_Y16X16 0x4 ++# define R300_TX_FORMAT_Z3Y3X2 0x5 ++# define R300_TX_FORMAT_Z5Y6X5 0x6 ++# define R300_TX_FORMAT_Z6Y5X5 0x7 ++# define R300_TX_FORMAT_Z11Y11X10 0x8 ++# define R300_TX_FORMAT_Z10Y11X11 0x9 ++# define R300_TX_FORMAT_W4Z4Y4X4 0xA ++# define R300_TX_FORMAT_W1Z5Y5X5 0xB ++# define R300_TX_FORMAT_W8Z8Y8X8 0xC ++# define R300_TX_FORMAT_W2Z10Y10X10 0xD ++# define R300_TX_FORMAT_W16Z16Y16X16 0xE ++# define R300_TX_FORMAT_DXT1 0xF ++# define R300_TX_FORMAT_DXT3 0x10 ++# define R300_TX_FORMAT_DXT5 0x11 ++# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ ++# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ ++# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ ++# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ ++ /* 0x16 - some 16 bit green format.. ?? */ ++# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ ++# define R300_TX_FORMAT_CUBIC_MAP (1 << 26) ++ ++ /* gap */ ++ /* Floating point formats */ ++ /* Note - hardware supports both 16 and 32 bit floating point */ ++# define R300_TX_FORMAT_FL_I16 0x18 ++# define R300_TX_FORMAT_FL_I16A16 0x19 ++# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A ++# define R300_TX_FORMAT_FL_I32 0x1B ++# define R300_TX_FORMAT_FL_I32A32 0x1C ++# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D ++ /* alpha modes, convenience mostly */ ++ /* if you have alpha, pick constant appropriate to the ++ number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ ++# define R300_TX_FORMAT_ALPHA_1CH 0x000 ++# define R300_TX_FORMAT_ALPHA_2CH 0x200 ++# define R300_TX_FORMAT_ALPHA_4CH 0x600 ++# define R300_TX_FORMAT_ALPHA_NONE 0xA00 ++ /* Swizzling */ ++ /* constants */ ++# define R300_TX_FORMAT_X 0 ++# define R300_TX_FORMAT_Y 1 ++# define R300_TX_FORMAT_Z 2 ++# define R300_TX_FORMAT_W 3 ++# define R300_TX_FORMAT_ZERO 4 ++# define R300_TX_FORMAT_ONE 5 ++ /* 2.0*Z, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_Z 6 ++ /* 2.0*W, everything above 1.0 is set to 0.0 */ ++# define R300_TX_FORMAT_CUT_W 7 ++ ++# define R300_TX_FORMAT_B_SHIFT 18 ++# define R300_TX_FORMAT_G_SHIFT 15 ++# define R300_TX_FORMAT_R_SHIFT 12 ++# define R300_TX_FORMAT_A_SHIFT 9 ++ /* Convenience macro to take care of layout and swizzling */ ++# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \ ++ ((R300_TX_FORMAT_##B)< 0.5, return ARG0, else return ARG1 ++ * - CMP: If ARG2 < 0, return ARG1, else return ARG0 ++ * - FLR: use FRC+MAD ++ * - XPD: use MAD+MAD ++ * - SGE, SLT: use MAD+CMP ++ * - RSQ: use ABS modifier for argument ++ * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation ++ * (e.g. RCP) into color register ++ * - apparently, there's no quick DST operation ++ * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2" ++ * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0" ++ * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1" ++ * ++ * Operand selection ++ * First stage selects three sources from the available registers and ++ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha). ++ * fglrx sorts the three source fields: Registers before constants, ++ * lower indices before higher indices; I do not know whether this is ++ * necessary. ++ * ++ * fglrx fills unused sources with "read constant 0" ++ * According to specs, you cannot select more than two different constants. ++ * ++ * Second stage selects the operands from the sources. This is defined in ++ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants ++ * zero and one. ++ * Swizzling and negation happens in this stage, as well. ++ * ++ * Important: Color and alpha seem to be mostly separate, i.e. their sources ++ * selection appears to be fully independent (the register storage is probably ++ * physically split into a color and an alpha section). ++ * However (because of the apparent physical split), there is some interaction ++ * WRT swizzling. If, for example, you want to load an R component into an ++ * Alpha operand, this R component is taken from a *color* source, not from ++ * an alpha source. The corresponding register doesn't even have to appear in ++ * the alpha sources list. (I hope this all makes sense to you) ++ * ++ * Destination selection ++ * The destination register index is in FPI1 (color) and FPI3 (alpha) ++ * together with enable bits. ++ * There are separate enable bits for writing into temporary registers ++ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* ++ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the ++ * same index must be used for both). ++ * ++ * Note: There is a special form for LRP ++ * - Argument order is the same as in ARB_fragment_program. ++ * - Operation is MAD ++ * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP ++ * - Set FPI0/FPI2_SPECIAL_LRP ++ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD ++ */ ++#define R300_PFS_INSTR1_0 0x46C0 ++# define R300_FPI1_SRC0C_SHIFT 0 ++# define R300_FPI1_SRC0C_MASK (31 << 0) ++# define R300_FPI1_SRC0C_CONST (1 << 5) ++# define R300_FPI1_SRC1C_SHIFT 6 ++# define R300_FPI1_SRC1C_MASK (31 << 6) ++# define R300_FPI1_SRC1C_CONST (1 << 11) ++# define R300_FPI1_SRC2C_SHIFT 12 ++# define R300_FPI1_SRC2C_MASK (31 << 12) ++# define R300_FPI1_SRC2C_CONST (1 << 17) ++# define R300_FPI1_SRC_MASK 0x0003ffff ++# define R300_FPI1_DSTC_SHIFT 18 ++# define R300_FPI1_DSTC_MASK (31 << 18) ++# define R300_FPI1_DSTC_REG_MASK_SHIFT 23 ++# define R300_FPI1_DSTC_REG_X (1 << 23) ++# define R300_FPI1_DSTC_REG_Y (1 << 24) ++# define R300_FPI1_DSTC_REG_Z (1 << 25) ++# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26 ++# define R300_FPI1_DSTC_OUTPUT_X (1 << 26) ++# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) ++# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) ++ ++#define R300_PFS_INSTR3_0 0x47C0 ++# define R300_FPI3_SRC0A_SHIFT 0 ++# define R300_FPI3_SRC0A_MASK (31 << 0) ++# define R300_FPI3_SRC0A_CONST (1 << 5) ++# define R300_FPI3_SRC1A_SHIFT 6 ++# define R300_FPI3_SRC1A_MASK (31 << 6) ++# define R300_FPI3_SRC1A_CONST (1 << 11) ++# define R300_FPI3_SRC2A_SHIFT 12 ++# define R300_FPI3_SRC2A_MASK (31 << 12) ++# define R300_FPI3_SRC2A_CONST (1 << 17) ++# define R300_FPI3_SRC_MASK 0x0003ffff ++# define R300_FPI3_DSTA_SHIFT 18 ++# define R300_FPI3_DSTA_MASK (31 << 18) ++# define R300_FPI3_DSTA_REG (1 << 23) ++# define R300_FPI3_DSTA_OUTPUT (1 << 24) ++# define R300_FPI3_DSTA_DEPTH (1 << 27) ++ ++#define R300_PFS_INSTR0_0 0x48C0 ++# define R300_FPI0_ARGC_SRC0C_XYZ 0 ++# define R300_FPI0_ARGC_SRC0C_XXX 1 ++# define R300_FPI0_ARGC_SRC0C_YYY 2 ++# define R300_FPI0_ARGC_SRC0C_ZZZ 3 ++# define R300_FPI0_ARGC_SRC1C_XYZ 4 ++# define R300_FPI0_ARGC_SRC1C_XXX 5 ++# define R300_FPI0_ARGC_SRC1C_YYY 6 ++# define R300_FPI0_ARGC_SRC1C_ZZZ 7 ++# define R300_FPI0_ARGC_SRC2C_XYZ 8 ++# define R300_FPI0_ARGC_SRC2C_XXX 9 ++# define R300_FPI0_ARGC_SRC2C_YYY 10 ++# define R300_FPI0_ARGC_SRC2C_ZZZ 11 ++# define R300_FPI0_ARGC_SRC0A 12 ++# define R300_FPI0_ARGC_SRC1A 13 ++# define R300_FPI0_ARGC_SRC2A 14 ++# define R300_FPI0_ARGC_SRC1C_LRP 15 ++# define R300_FPI0_ARGC_ZERO 20 ++# define R300_FPI0_ARGC_ONE 21 ++ /* GUESS */ ++# define R300_FPI0_ARGC_HALF 22 ++# define R300_FPI0_ARGC_SRC0C_YZX 23 ++# define R300_FPI0_ARGC_SRC1C_YZX 24 ++# define R300_FPI0_ARGC_SRC2C_YZX 25 ++# define R300_FPI0_ARGC_SRC0C_ZXY 26 ++# define R300_FPI0_ARGC_SRC1C_ZXY 27 ++# define R300_FPI0_ARGC_SRC2C_ZXY 28 ++# define R300_FPI0_ARGC_SRC0CA_WZY 29 ++# define R300_FPI0_ARGC_SRC1CA_WZY 30 ++# define R300_FPI0_ARGC_SRC2CA_WZY 31 ++ ++# define R300_FPI0_ARG0C_SHIFT 0 ++# define R300_FPI0_ARG0C_MASK (31 << 0) ++# define R300_FPI0_ARG0C_NEG (1 << 5) ++# define R300_FPI0_ARG0C_ABS (1 << 6) ++# define R300_FPI0_ARG1C_SHIFT 7 ++# define R300_FPI0_ARG1C_MASK (31 << 7) ++# define R300_FPI0_ARG1C_NEG (1 << 12) ++# define R300_FPI0_ARG1C_ABS (1 << 13) ++# define R300_FPI0_ARG2C_SHIFT 14 ++# define R300_FPI0_ARG2C_MASK (31 << 14) ++# define R300_FPI0_ARG2C_NEG (1 << 19) ++# define R300_FPI0_ARG2C_ABS (1 << 20) ++# define R300_FPI0_SPECIAL_LRP (1 << 21) ++# define R300_FPI0_OUTC_MAD (0 << 23) ++# define R300_FPI0_OUTC_DP3 (1 << 23) ++# define R300_FPI0_OUTC_DP4 (2 << 23) ++# define R300_FPI0_OUTC_MIN (4 << 23) ++# define R300_FPI0_OUTC_MAX (5 << 23) ++# define R300_FPI0_OUTC_CMPH (7 << 23) ++# define R300_FPI0_OUTC_CMP (8 << 23) ++# define R300_FPI0_OUTC_FRC (9 << 23) ++# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) ++# define R300_FPI0_OUTC_SAT (1 << 30) ++# define R300_FPI0_INSERT_NOP (1 << 31) ++ ++#define R300_PFS_INSTR2_0 0x49C0 ++# define R300_FPI2_ARGA_SRC0C_X 0 ++# define R300_FPI2_ARGA_SRC0C_Y 1 ++# define R300_FPI2_ARGA_SRC0C_Z 2 ++# define R300_FPI2_ARGA_SRC1C_X 3 ++# define R300_FPI2_ARGA_SRC1C_Y 4 ++# define R300_FPI2_ARGA_SRC1C_Z 5 ++# define R300_FPI2_ARGA_SRC2C_X 6 ++# define R300_FPI2_ARGA_SRC2C_Y 7 ++# define R300_FPI2_ARGA_SRC2C_Z 8 ++# define R300_FPI2_ARGA_SRC0A 9 ++# define R300_FPI2_ARGA_SRC1A 10 ++# define R300_FPI2_ARGA_SRC2A 11 ++# define R300_FPI2_ARGA_SRC1A_LRP 15 ++# define R300_FPI2_ARGA_ZERO 16 ++# define R300_FPI2_ARGA_ONE 17 ++ /* GUESS */ ++# define R300_FPI2_ARGA_HALF 18 ++# define R300_FPI2_ARG0A_SHIFT 0 ++# define R300_FPI2_ARG0A_MASK (31 << 0) ++# define R300_FPI2_ARG0A_NEG (1 << 5) ++ /* GUESS */ ++# define R300_FPI2_ARG0A_ABS (1 << 6) ++# define R300_FPI2_ARG1A_SHIFT 7 ++# define R300_FPI2_ARG1A_MASK (31 << 7) ++# define R300_FPI2_ARG1A_NEG (1 << 12) ++ /* GUESS */ ++# define R300_FPI2_ARG1A_ABS (1 << 13) ++# define R300_FPI2_ARG2A_SHIFT 14 ++# define R300_FPI2_ARG2A_MASK (31 << 14) ++# define R300_FPI2_ARG2A_NEG (1 << 19) ++ /* GUESS */ ++# define R300_FPI2_ARG2A_ABS (1 << 20) ++# define R300_FPI2_SPECIAL_LRP (1 << 21) ++# define R300_FPI2_OUTA_MAD (0 << 23) ++# define R300_FPI2_OUTA_DP4 (1 << 23) ++# define R300_FPI2_OUTA_MIN (2 << 23) ++# define R300_FPI2_OUTA_MAX (3 << 23) ++# define R300_FPI2_OUTA_CMP (6 << 23) ++# define R300_FPI2_OUTA_FRC (7 << 23) ++# define R300_FPI2_OUTA_EX2 (8 << 23) ++# define R300_FPI2_OUTA_LG2 (9 << 23) ++# define R300_FPI2_OUTA_RCP (10 << 23) ++# define R300_FPI2_OUTA_RSQ (11 << 23) ++# define R300_FPI2_OUTA_SAT (1 << 30) ++# define R300_FPI2_UNKNOWN_31 (1 << 31) ++/* END: Fragment program instruction set */ ++ ++/* Fog state and color */ ++#define R300_RE_FOG_STATE 0x4BC0 ++# define R300_FOG_ENABLE (1 << 0) ++# define R300_FOG_MODE_LINEAR (0 << 1) ++# define R300_FOG_MODE_EXP (1 << 1) ++# define R300_FOG_MODE_EXP2 (2 << 1) ++# define R300_FOG_MODE_MASK (3 << 1) ++#define R300_FOG_COLOR_R 0x4BC8 ++#define R300_FOG_COLOR_G 0x4BCC ++#define R300_FOG_COLOR_B 0x4BD0 ++ ++#define R300_PP_ALPHA_TEST 0x4BD4 ++# define R300_REF_ALPHA_MASK 0x000000ff ++# define R300_ALPHA_TEST_FAIL (0 << 8) ++# define R300_ALPHA_TEST_LESS (1 << 8) ++# define R300_ALPHA_TEST_LEQUAL (3 << 8) ++# define R300_ALPHA_TEST_EQUAL (2 << 8) ++# define R300_ALPHA_TEST_GEQUAL (6 << 8) ++# define R300_ALPHA_TEST_GREATER (4 << 8) ++# define R300_ALPHA_TEST_NEQUAL (5 << 8) ++# define R300_ALPHA_TEST_PASS (7 << 8) ++# define R300_ALPHA_TEST_OP_MASK (7 << 8) ++# define R300_ALPHA_TEST_ENABLE (1 << 11) ++ ++/* gap */ ++ ++/* Fragment program parameters in 7.16 floating point */ ++#define R300_PFS_PARAM_0_X 0x4C00 ++#define R300_PFS_PARAM_0_Y 0x4C04 ++#define R300_PFS_PARAM_0_Z 0x4C08 ++#define R300_PFS_PARAM_0_W 0x4C0C ++/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */ ++#define R300_PFS_PARAM_31_X 0x4DF0 ++#define R300_PFS_PARAM_31_Y 0x4DF4 ++#define R300_PFS_PARAM_31_Z 0x4DF8 ++#define R300_PFS_PARAM_31_W 0x4DFC ++ ++/* Notes: ++ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in ++ * the application ++ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND ++ * are set to the same ++ * function (both registers are always set up completely in any case) ++ * - Most blend flags are simply copied from R200 and not tested yet ++ */ ++#define R300_RB3D_CBLEND 0x4E04 ++#define R300_RB3D_ABLEND 0x4E08 ++/* the following only appear in CBLEND */ ++# define R300_BLEND_ENABLE (1 << 0) ++# define R300_BLEND_UNKNOWN (3 << 1) ++# define R300_BLEND_NO_SEPARATE (1 << 3) ++/* the following are shared between CBLEND and ABLEND */ ++# define R300_FCN_MASK (3 << 12) ++# define R300_COMB_FCN_ADD_CLAMP (0 << 12) ++# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12) ++# define R300_COMB_FCN_SUB_CLAMP (2 << 12) ++# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12) ++# define R300_COMB_FCN_MIN (4 << 12) ++# define R300_COMB_FCN_MAX (5 << 12) ++# define R300_COMB_FCN_RSUB_CLAMP (6 << 12) ++# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12) ++# define R300_BLEND_GL_ZERO (32) ++# define R300_BLEND_GL_ONE (33) ++# define R300_BLEND_GL_SRC_COLOR (34) ++# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35) ++# define R300_BLEND_GL_DST_COLOR (36) ++# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37) ++# define R300_BLEND_GL_SRC_ALPHA (38) ++# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39) ++# define R300_BLEND_GL_DST_ALPHA (40) ++# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41) ++# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42) ++# define R300_BLEND_GL_CONST_COLOR (43) ++# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44) ++# define R300_BLEND_GL_CONST_ALPHA (45) ++# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46) ++# define R300_BLEND_MASK (63) ++# define R300_SRC_BLEND_SHIFT (16) ++# define R300_DST_BLEND_SHIFT (24) ++#define R300_RB3D_BLEND_COLOR 0x4E10 ++#define R300_RB3D_COLORMASK 0x4E0C ++# define R300_COLORMASK0_B (1<<0) ++# define R300_COLORMASK0_G (1<<1) ++# define R300_COLORMASK0_R (1<<2) ++# define R300_COLORMASK0_A (1<<3) ++ ++/* gap */ ++ ++#define R300_RB3D_COLOROFFSET0 0x4E28 ++# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ ++#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ ++#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ ++#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */ ++ ++/* gap */ ++ ++/* Bit 16: Larger tiles ++ * Bit 17: 4x2 tiles ++ * Bit 18: Extremely weird tile like, but some pixels duplicated? ++ */ ++#define R300_RB3D_COLORPITCH0 0x4E38 ++# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ ++# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ ++# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ ++# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ ++# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ ++# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ ++# define R300_COLOR_FORMAT_RGB565 (2 << 22) ++# define R300_COLOR_FORMAT_ARGB8888 (3 << 22) ++#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */ ++#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ ++#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ ++ ++#define R300_RB3D_AARESOLVE_CTL 0x4E88 ++/* gap */ ++ ++/* Guess by Vladimir. ++ * Set to 0A before 3D operations, set to 02 afterwards. ++ */ ++/*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/ ++# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002 ++# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A ++ ++/* gap */ ++/* There seems to be no "write only" setting, so use Z-test = ALWAYS ++ * for this. ++ * Bit (1<<8) is the "test" bit. so plain write is 6 - vd ++ */ ++#define R300_ZB_CNTL 0x4F00 ++# define R300_STENCIL_ENABLE (1 << 0) ++# define R300_Z_ENABLE (1 << 1) ++# define R300_Z_WRITE_ENABLE (1 << 2) ++# define R300_Z_SIGNED_COMPARE (1 << 3) ++# define R300_STENCIL_FRONT_BACK (1 << 4) ++ ++#define R300_ZB_ZSTENCILCNTL 0x4f04 ++ /* functions */ ++# define R300_ZS_NEVER 0 ++# define R300_ZS_LESS 1 ++# define R300_ZS_LEQUAL 2 ++# define R300_ZS_EQUAL 3 ++# define R300_ZS_GEQUAL 4 ++# define R300_ZS_GREATER 5 ++# define R300_ZS_NOTEQUAL 6 ++# define R300_ZS_ALWAYS 7 ++# define R300_ZS_MASK 7 ++ /* operations */ ++# define R300_ZS_KEEP 0 ++# define R300_ZS_ZERO 1 ++# define R300_ZS_REPLACE 2 ++# define R300_ZS_INCR 3 ++# define R300_ZS_DECR 4 ++# define R300_ZS_INVERT 5 ++# define R300_ZS_INCR_WRAP 6 ++# define R300_ZS_DECR_WRAP 7 ++# define R300_Z_FUNC_SHIFT 0 ++ /* front and back refer to operations done for front ++ and back faces, i.e. separate stencil function support */ ++# define R300_S_FRONT_FUNC_SHIFT 3 ++# define R300_S_FRONT_SFAIL_OP_SHIFT 6 ++# define R300_S_FRONT_ZPASS_OP_SHIFT 9 ++# define R300_S_FRONT_ZFAIL_OP_SHIFT 12 ++# define R300_S_BACK_FUNC_SHIFT 15 ++# define R300_S_BACK_SFAIL_OP_SHIFT 18 ++# define R300_S_BACK_ZPASS_OP_SHIFT 21 ++# define R300_S_BACK_ZFAIL_OP_SHIFT 24 ++ ++#define R300_ZB_STENCILREFMASK 0x4f08 ++# define R300_STENCILREF_SHIFT 0 ++# define R300_STENCILREF_MASK 0x000000ff ++# define R300_STENCILMASK_SHIFT 8 ++# define R300_STENCILMASK_MASK 0x0000ff00 ++# define R300_STENCILWRITEMASK_SHIFT 16 ++# define R300_STENCILWRITEMASK_MASK 0x00ff0000 ++ ++/* gap */ ++ ++#define R300_ZB_FORMAT 0x4f10 ++# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0) ++# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0) ++# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0) ++/* reserved up to (15 << 0) */ ++# define R300_INVERT_13E3_LEADING_ONES (0 << 4) ++# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4) ++ ++#define R300_ZB_ZTOP 0x4F14 ++# define R300_ZTOP_DISABLE (0 << 0) ++# define R300_ZTOP_ENABLE (1 << 0) ++ ++/* gap */ ++ ++#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31) ++# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31) ++ ++#define R300_ZB_BW_CNTL 0x4f1c ++# define R300_HIZ_DISABLE (0 << 0) ++# define R300_HIZ_ENABLE (1 << 0) ++# define R300_HIZ_MIN (0 << 1) ++# define R300_HIZ_MAX (1 << 1) ++# define R300_FAST_FILL_DISABLE (0 << 2) ++# define R300_FAST_FILL_ENABLE (1 << 2) ++# define R300_RD_COMP_DISABLE (0 << 3) ++# define R300_RD_COMP_ENABLE (1 << 3) ++# define R300_WR_COMP_DISABLE (0 << 4) ++# define R300_WR_COMP_ENABLE (1 << 4) ++# define R300_ZB_CB_CLEAR_RMW (0 << 5) ++# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5) ++# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6) ++# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6) ++ ++# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7) ++# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7) ++# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8) ++# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8) ++ ++# define R500_BMASK_ENABLE (0 << 10) ++# define R500_BMASK_DISABLE (1 << 10) ++# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11) ++# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11) ++# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12) ++# define R500_HIZ_FP_EXP_BITS_1 (1 << 12) ++# define R500_HIZ_FP_EXP_BITS_2 (2 << 12) ++# define R500_HIZ_FP_EXP_BITS_3 (3 << 12) ++# define R500_HIZ_FP_EXP_BITS_4 (4 << 12) ++# define R500_HIZ_FP_EXP_BITS_5 (5 << 12) ++# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15) ++# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15) ++# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16) ++# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16) ++# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17) ++# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17) ++# define R500_PEQ_PACKING_DISABLE (0 << 18) ++# define R500_PEQ_PACKING_ENABLE (1 << 18) ++# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18) ++# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18) ++ ++ ++/* gap */ ++ ++/* Z Buffer Address Offset. ++ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles. ++ */ ++#define R300_ZB_DEPTHOFFSET 0x4f20 ++ ++/* Z Buffer Pitch and Endian Control */ ++#define R300_ZB_DEPTHPITCH 0x4f24 ++# define R300_DEPTHPITCH_MASK 0x00003FFC ++# define R300_DEPTHMACROTILE_DISABLE (0 << 16) ++# define R300_DEPTHMACROTILE_ENABLE (1 << 16) ++# define R300_DEPTHMICROTILE_LINEAR (0 << 17) ++# define R300_DEPTHMICROTILE_TILED (1 << 17) ++# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17) ++# define R300_DEPTHENDIAN_NO_SWAP (0 << 18) ++# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18) ++# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18) ++# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18) ++ ++/* Z Buffer Clear Value */ ++#define R300_ZB_DEPTHCLEARVALUE 0x4f28 ++ ++#define R300_ZB_ZMASK_OFFSET 0x4f30 ++#define R300_ZB_ZMASK_PITCH 0x4f34 ++#define R300_ZB_ZMASK_WRINDEX 0x4f38 ++#define R300_ZB_ZMASK_DWORD 0x4f3c ++#define R300_ZB_ZMASK_RDINDEX 0x4f40 ++ ++/* Hierarchical Z Memory Offset */ ++#define R300_ZB_HIZ_OFFSET 0x4f44 ++ ++/* Hierarchical Z Write Index */ ++#define R300_ZB_HIZ_WRINDEX 0x4f48 ++ ++/* Hierarchical Z Data */ ++#define R300_ZB_HIZ_DWORD 0x4f4c ++ ++/* Hierarchical Z Read Index */ ++#define R300_ZB_HIZ_RDINDEX 0x4f50 ++ ++/* Hierarchical Z Pitch */ ++#define R300_ZB_HIZ_PITCH 0x4f54 ++ ++/* Z Buffer Z Pass Counter Data */ ++#define R300_ZB_ZPASS_DATA 0x4f58 ++ ++/* Z Buffer Z Pass Counter Address */ ++#define R300_ZB_ZPASS_ADDR 0x4f5c ++ ++/* Depth buffer X and Y coordinate offset */ ++#define R300_ZB_DEPTHXY_OFFSET 0x4f60 ++# define R300_DEPTHX_OFFSET_SHIFT 1 ++# define R300_DEPTHX_OFFSET_MASK 0x000007FE ++# define R300_DEPTHY_OFFSET_SHIFT 17 ++# define R300_DEPTHY_OFFSET_MASK 0x07FE0000 ++ ++/* Sets the fifo sizes */ ++#define R500_ZB_FIFO_SIZE 0x4fd0 ++# define R500_OP_FIFO_SIZE_FULL (0 << 0) ++# define R500_OP_FIFO_SIZE_HALF (1 << 0) ++# define R500_OP_FIFO_SIZE_QUATER (2 << 0) ++# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0) ++ ++/* Stencil Reference Value and Mask for backfacing quads */ ++/* R300_ZB_STENCILREFMASK handles front face */ ++#define R500_ZB_STENCILREFMASK_BF 0x4fd4 ++# define R500_STENCILREF_SHIFT 0 ++# define R500_STENCILREF_MASK 0x000000ff ++# define R500_STENCILMASK_SHIFT 8 ++# define R500_STENCILMASK_MASK 0x0000ff00 ++# define R500_STENCILWRITEMASK_SHIFT 16 ++# define R500_STENCILWRITEMASK_MASK 0x00ff0000 ++ ++/* BEGIN: Vertex program instruction set */ ++ ++/* Every instruction is four dwords long: ++ * DWORD 0: output and opcode ++ * DWORD 1: first argument ++ * DWORD 2: second argument ++ * DWORD 3: third argument ++ * ++ * Notes: ++ * - ABS r, a is implemented as MAX r, a, -a ++ * - MOV is implemented as ADD to zero ++ * - XPD is implemented as MUL + MAD ++ * - FLR is implemented as FRC + ADD ++ * - apparently, fglrx tries to schedule instructions so that there is at ++ * least one instruction between the write to a temporary and the first ++ * read from said temporary; however, violations of this scheduling are ++ * allowed ++ * - register indices seem to be unrelated with OpenGL aliasing to ++ * conventional state ++ * - only one attribute and one parameter can be loaded at a time; however, ++ * the same attribute/parameter can be used for more than one argument ++ * - the second software argument for POW is the third hardware argument ++ * (no idea why) ++ * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2 ++ * ++ * There is some magic surrounding LIT: ++ * The single argument is replicated across all three inputs, but swizzled: ++ * First argument: xyzy ++ * Second argument: xyzx ++ * Third argument: xyzw ++ * Whenever the result is used later in the fragment program, fglrx forces ++ * x and w to be 1.0 in the input selection; I don't know whether this is ++ * strictly necessary ++ */ ++#define R300_VPI_OUT_OP_DOT (1 << 0) ++#define R300_VPI_OUT_OP_MUL (2 << 0) ++#define R300_VPI_OUT_OP_ADD (3 << 0) ++#define R300_VPI_OUT_OP_MAD (4 << 0) ++#define R300_VPI_OUT_OP_DST (5 << 0) ++#define R300_VPI_OUT_OP_FRC (6 << 0) ++#define R300_VPI_OUT_OP_MAX (7 << 0) ++#define R300_VPI_OUT_OP_MIN (8 << 0) ++#define R300_VPI_OUT_OP_SGE (9 << 0) ++#define R300_VPI_OUT_OP_SLT (10 << 0) ++ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */ ++#define R300_VPI_OUT_OP_UNK12 (12 << 0) ++#define R300_VPI_OUT_OP_ARL (13 << 0) ++#define R300_VPI_OUT_OP_EXP (65 << 0) ++#define R300_VPI_OUT_OP_LOG (66 << 0) ++ /* Used in fog computations, scalar(scalar) */ ++#define R300_VPI_OUT_OP_UNK67 (67 << 0) ++#define R300_VPI_OUT_OP_LIT (68 << 0) ++#define R300_VPI_OUT_OP_POW (69 << 0) ++#define R300_VPI_OUT_OP_RCP (70 << 0) ++#define R300_VPI_OUT_OP_RSQ (72 << 0) ++ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */ ++#define R300_VPI_OUT_OP_UNK73 (73 << 0) ++#define R300_VPI_OUT_OP_EX2 (75 << 0) ++#define R300_VPI_OUT_OP_LG2 (76 << 0) ++#define R300_VPI_OUT_OP_MAD_2 (128 << 0) ++ /* all temps, vector(scalar, vector, vector) */ ++#define R300_VPI_OUT_OP_UNK129 (129 << 0) ++ ++#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8) ++#define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8) ++#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8) ++#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8) ++ ++#define R300_VPI_OUT_REG_INDEX_SHIFT 13 ++ /* GUESS based on fglrx native limits */ ++#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) ++ ++#define R300_VPI_OUT_WRITE_X (1 << 20) ++#define R300_VPI_OUT_WRITE_Y (1 << 21) ++#define R300_VPI_OUT_WRITE_Z (1 << 22) ++#define R300_VPI_OUT_WRITE_W (1 << 23) ++ ++#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0) ++#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0) ++#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0) ++#define R300_VPI_IN_REG_CLASS_NONE (9 << 0) ++#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) ++ ++#define R300_VPI_IN_REG_INDEX_SHIFT 5 ++ /* GUESS based on fglrx native limits */ ++#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) ++ ++/* The R300 can select components from the input register arbitrarily. ++ * Use the following constants, shifted by the component shift you ++ * want to select ++ */ ++#define R300_VPI_IN_SELECT_X 0 ++#define R300_VPI_IN_SELECT_Y 1 ++#define R300_VPI_IN_SELECT_Z 2 ++#define R300_VPI_IN_SELECT_W 3 ++#define R300_VPI_IN_SELECT_ZERO 4 ++#define R300_VPI_IN_SELECT_ONE 5 ++#define R300_VPI_IN_SELECT_MASK 7 ++ ++#define R300_VPI_IN_X_SHIFT 13 ++#define R300_VPI_IN_Y_SHIFT 16 ++#define R300_VPI_IN_Z_SHIFT 19 ++#define R300_VPI_IN_W_SHIFT 22 ++ ++#define R300_VPI_IN_NEG_X (1 << 25) ++#define R300_VPI_IN_NEG_Y (1 << 26) ++#define R300_VPI_IN_NEG_Z (1 << 27) ++#define R300_VPI_IN_NEG_W (1 << 28) ++/* END: Vertex program instruction set */ ++ ++/* BEGIN: Packet 3 commands */ ++ ++/* A primitive emission dword. */ ++#define R300_PRIM_TYPE_NONE (0 << 0) ++#define R300_PRIM_TYPE_POINT (1 << 0) ++#define R300_PRIM_TYPE_LINE (2 << 0) ++#define R300_PRIM_TYPE_LINE_STRIP (3 << 0) ++#define R300_PRIM_TYPE_TRI_LIST (4 << 0) ++#define R300_PRIM_TYPE_TRI_FAN (5 << 0) ++#define R300_PRIM_TYPE_TRI_STRIP (6 << 0) ++#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0) ++#define R300_PRIM_TYPE_RECT_LIST (8 << 0) ++#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) ++#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) ++ /* GUESS (based on r200) */ ++#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) ++#define R300_PRIM_TYPE_LINE_LOOP (12 << 0) ++#define R300_PRIM_TYPE_QUADS (13 << 0) ++#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0) ++#define R300_PRIM_TYPE_POLYGON (15 << 0) ++#define R300_PRIM_TYPE_MASK 0xF ++#define R300_PRIM_WALK_IND (1 << 4) ++#define R300_PRIM_WALK_LIST (2 << 4) ++#define R300_PRIM_WALK_RING (3 << 4) ++#define R300_PRIM_WALK_MASK (3 << 4) ++ /* GUESS (based on r200) */ ++#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) ++#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) ++#define R300_PRIM_NUM_VERTICES_SHIFT 16 ++#define R300_PRIM_NUM_VERTICES_MASK 0xffff ++ ++/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR. ++ * Two parameter dwords: ++ * 0. The first parameter appears to be always 0 ++ * 1. The second parameter is a standard primitive emission dword. ++ */ ++#define R300_PACKET3_3D_DRAW_VBUF 0x00002800 ++ ++/* Specify the full set of vertex arrays as (address, stride). ++ * The first parameter is the number of vertex arrays specified. ++ * The rest of the command is a variable length list of blocks, where ++ * each block is three dwords long and specifies two arrays. ++ * The first dword of a block is split into two words, the lower significant ++ * word refers to the first array, the more significant word to the second ++ * array in the block. ++ * The low byte of each word contains the size of an array entry in dwords, ++ * the high byte contains the stride of the array. ++ * The second dword of a block contains the pointer to the first array, ++ * the third dword of a block contains the pointer to the second array. ++ * Note that if the total number of arrays is odd, the third dword of ++ * the last block is omitted. ++ */ ++#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00 ++ ++#define R300_PACKET3_INDX_BUFFER 0x00003300 ++# define R300_EB_UNK1_SHIFT 24 ++# define R300_EB_UNK1 (0x80<<24) ++# define R300_EB_UNK2 0x0810 ++#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400 ++#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 ++ ++/* END: Packet 3 commands */ ++ ++ ++/* Color formats for 2d packets ++ */ ++#define R300_CP_COLOR_FORMAT_CI8 2 ++#define R300_CP_COLOR_FORMAT_ARGB1555 3 ++#define R300_CP_COLOR_FORMAT_RGB565 4 ++#define R300_CP_COLOR_FORMAT_ARGB8888 6 ++#define R300_CP_COLOR_FORMAT_RGB332 7 ++#define R300_CP_COLOR_FORMAT_RGB8 9 ++#define R300_CP_COLOR_FORMAT_ARGB4444 15 ++ ++/* ++ * CP type-3 packets ++ */ ++#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00 ++ ++#define R500_VAP_INDEX_OFFSET 0x208c ++ ++#define R500_GA_US_VECTOR_INDEX 0x4250 ++#define R500_GA_US_VECTOR_DATA 0x4254 ++ ++#define R500_RS_IP_0 0x4074 ++#define R500_RS_INST_0 0x4320 ++ ++#define R500_US_CONFIG 0x4600 ++ ++#define R500_US_FC_CTRL 0x4624 ++#define R500_US_CODE_ADDR 0x4630 ++ ++#define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0 ++#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8 ++ ++#endif /* _R300_REG_H */ ++ ++/* *INDENT-ON* */ +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_cp.c git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c +--- git/drivers/gpu/drm-tungsten/radeon_cp.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1771 @@ ++/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ ++/* ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * Copyright 2007 Advanced Micro Devices, Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++#include "r300_reg.h" ++ ++#include "radeon_microcode.h" ++#define RADEON_FIFO_DEBUG 0 ++ ++static int radeon_do_cleanup_cp(struct drm_device * dev); ++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); ++ ++static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); ++ ret = RADEON_READ(R520_MC_IND_DATA); ++ RADEON_WRITE(R520_MC_IND_INDEX, 0); ++ return ret; ++} ++ ++static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); ++ ret = RADEON_READ(RS480_NB_MC_DATA); ++ RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); ++ return ret; ++} ++ ++static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ u32 ret; ++ RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); ++ ret = RADEON_READ(RS690_MC_DATA); ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); ++ return ret; ++} ++ ++static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ return RS690_READ_MCIND(dev_priv, addr); ++ else ++ return RS480_READ_MCIND(dev_priv, addr); ++} ++ ++u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) ++{ ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); ++ else ++ return RADEON_READ(RADEON_MC_FB_LOCATION); ++} ++ ++static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); ++ else ++ RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); ++} ++ ++static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) ++{ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ++ R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); ++ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) ++ R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); ++ else ++ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); ++} ++ ++static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) ++{ ++ u32 agp_base_hi = upper_32_bits(agp_base); ++ u32 agp_base_lo = agp_base & 0xffffffff; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { ++ R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); ++ R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { ++ RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); ++ RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { ++ R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); ++ R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ++ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); ++ RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); ++ } else { ++ RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) ++ RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); ++ } ++} ++ ++static int RADEON_READ_PLL(struct drm_device * dev, int addr) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); ++ return RADEON_READ(RADEON_CLOCK_CNTL_DATA); ++} ++ ++static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) ++{ ++ RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); ++ return RADEON_READ(RADEON_PCIE_DATA); ++} ++ ++#if RADEON_FIFO_DEBUG ++static void radeon_status(drm_radeon_private_t * dev_priv) ++{ ++ printk("%s:\n", __FUNCTION__); ++ printk("RBBM_STATUS = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); ++ printk("CP_RB_RTPR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); ++ printk("CP_RB_WTPR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); ++ printk("AIC_CNTL = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); ++ printk("AIC_STAT = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_STAT)); ++ printk("AIC_PT_BASE = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); ++ printk("TLB_ADDR = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); ++ printk("TLB_DATA = 0x%08x\n", ++ (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); ++} ++#endif ++ ++/* ================================================================ ++ * Engine, FIFO control ++ */ ++ ++static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) ++{ ++ u32 tmp; ++ int i; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { ++ tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); ++ tmp |= RADEON_RB3D_DC_FLUSH_ALL; ++ RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) ++ & RADEON_RB3D_DC_BUSY)) { ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ } else { ++ /* don't flush or purge cache here or lockup */ ++ return 0; ++ } ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) ++{ ++ int i; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ int slots = (RADEON_READ(RADEON_RBBM_STATUS) ++ & RADEON_RBBM_FIFOCNT_MASK); ++ if (slots >= entries) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", ++ RADEON_READ(RADEON_RBBM_STATUS), ++ RADEON_READ(R300_VAP_CNTL_STATUS)); ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) ++{ ++ int i, ret; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ ret = radeon_do_wait_for_fifo(dev_priv, 64); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ if (!(RADEON_READ(RADEON_RBBM_STATUS) ++ & RADEON_RBBM_ACTIVE)) { ++ radeon_do_pixcache_flush(dev_priv); ++ return 0; ++ } ++ DRM_UDELAY(1); ++ } ++ DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", ++ RADEON_READ(RADEON_RBBM_STATUS), ++ RADEON_READ(R300_VAP_CNTL_STATUS)); ++ ++#if RADEON_FIFO_DEBUG ++ DRM_ERROR("failed!\n"); ++ radeon_status(dev_priv); ++#endif ++ return -EBUSY; ++} ++ ++static void radeon_init_pipes(drm_radeon_private_t * dev_priv) ++{ ++ uint32_t gb_tile_config, gb_pipe_sel = 0; ++ ++ /* RS4xx/RS6xx/R4xx/R5xx */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { ++ gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); ++ dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; ++ } else { ++ /* R3xx */ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { ++ dev_priv->num_gb_pipes = 2; ++ } else { ++ /* R3Vxx */ ++ dev_priv->num_gb_pipes = 1; ++ } ++ } ++ DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); ++ ++ gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); ++ ++ switch(dev_priv->num_gb_pipes) { ++ case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; ++ case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; ++ case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; ++ default: ++ case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; ++ } ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { ++ RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); ++ RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); ++ } ++ RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); ++ radeon_do_wait_for_idle(dev_priv); ++ RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); ++ RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | ++ R300_DC_AUTOFLUSH_ENABLE | ++ R300_DC_DC_DISABLE_IGNORE_PE)); ++ ++ ++} ++ ++/* ================================================================ ++ * CP control, initialization ++ */ ++ ++/* Load the microcode for the CP */ ++static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv) ++{ ++ int i; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); ++ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { ++ DRM_INFO("Loading R100 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R100_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R100_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { ++ DRM_INFO("Loading R200 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R200_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R200_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { ++ DRM_INFO("Loading R300 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R300_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R300_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { ++ DRM_INFO("Loading R400 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R420_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R420_cp_microcode[i][0]); ++ } ++ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { ++ DRM_INFO("Loading RS690 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ RS690_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ RS690_cp_microcode[i][0]); ++ } ++ } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || ++ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { ++ DRM_INFO("Loading R500 Microcode\n"); ++ for (i = 0; i < 256; i++) { ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, ++ R520_cp_microcode[i][1]); ++ RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, ++ R520_cp_microcode[i][0]); ++ } ++ } ++} ++ ++/* Flush any pending commands to the CP. This should only be used just ++ * prior to a wait for idle, as it informs the engine that the command ++ * stream is ending. ++ */ ++static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) ++{ ++ DRM_DEBUG("\n"); ++#if 0 ++ u32 tmp; ++ ++ tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); ++#endif ++} ++ ++/* Wait for the CP to go idle. ++ */ ++int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(6); ++ ++ RADEON_PURGE_CACHE(); ++ RADEON_PURGE_ZCACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return radeon_do_wait_for_idle(dev_priv); ++} ++ ++/* Start the Command Processor. ++ */ ++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); ++ ++ dev_priv->cp_running = 1; ++ ++ BEGIN_RING(8); ++ /* isync can only be written through cp on r5xx write it here */ ++ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); ++ OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI | ++ RADEON_ISYNC_CPSCRATCH_IDLEGUI); ++ RADEON_PURGE_CACHE(); ++ RADEON_PURGE_ZCACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; ++} ++ ++/* Reset the Command Processor. This will not flush any pending ++ * commands, so you must wait for the CP command stream to complete ++ * before calling this routine. ++ */ ++static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) ++{ ++ u32 cur_read_ptr; ++ DRM_DEBUG("\n"); ++ ++ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); ++ SET_RING_HEAD(dev_priv, cur_read_ptr); ++ dev_priv->ring.tail = cur_read_ptr; ++} ++ ++/* Stop the Command Processor. This will not flush any pending ++ * commands, so you must flush the command stream and wait for the CP ++ * to go idle before calling this routine. ++ */ ++static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); ++ ++ dev_priv->cp_running = 0; ++} ++ ++/* Reset the engine. This will stop the CP if it is running. ++ */ ++static int radeon_do_engine_reset(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; ++ DRM_DEBUG("\n"); ++ ++ radeon_do_pixcache_flush(dev_priv); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { ++ /* may need something similar for newer chips */ ++ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); ++ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); ++ ++ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | ++ RADEON_FORCEON_MCLKA | ++ RADEON_FORCEON_MCLKB | ++ RADEON_FORCEON_YCLKA | ++ RADEON_FORCEON_YCLKB | ++ RADEON_FORCEON_MC | ++ RADEON_FORCEON_AIC)); ++ } ++ ++ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | ++ RADEON_SOFT_RESET_CP | ++ RADEON_SOFT_RESET_HI | ++ RADEON_SOFT_RESET_SE | ++ RADEON_SOFT_RESET_RE | ++ RADEON_SOFT_RESET_PP | ++ RADEON_SOFT_RESET_E2 | ++ RADEON_SOFT_RESET_RB)); ++ RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & ++ ~(RADEON_SOFT_RESET_CP | ++ RADEON_SOFT_RESET_HI | ++ RADEON_SOFT_RESET_SE | ++ RADEON_SOFT_RESET_RE | ++ RADEON_SOFT_RESET_PP | ++ RADEON_SOFT_RESET_E2 | ++ RADEON_SOFT_RESET_RB))); ++ RADEON_READ(RADEON_RBBM_SOFT_RESET); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { ++ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); ++ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); ++ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); ++ } ++ ++ /* setup the raster pipes */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) ++ radeon_init_pipes(dev_priv); ++ ++ /* Reset the CP ring */ ++ radeon_do_cp_reset(dev_priv); ++ ++ /* The CP is no longer running after an engine reset */ ++ dev_priv->cp_running = 0; ++ ++ /* Reset any pending vertex, indirect buffers */ ++ radeon_freelist_reset(dev); ++ ++ return 0; ++} ++ ++static void radeon_cp_init_ring_buffer(struct drm_device * dev, ++ drm_radeon_private_t * dev_priv) ++{ ++ u32 ring_start, cur_read_ptr; ++ u32 tmp; ++ ++ /* Initialize the memory controller. With new memory map, the fb location ++ * is not changed, it should have been properly initialized already. Part ++ * of the problem is that the code below is bogus, assuming the GART is ++ * always appended to the fb which is not necessarily the case ++ */ ++ if (!dev_priv->new_memmap) ++ radeon_write_fb_location(dev_priv, ++ ((dev_priv->gart_vm_start - 1) & 0xffff0000) ++ | (dev_priv->fb_location >> 16)); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ radeon_write_agp_base(dev_priv, dev->agp->base); ++ ++ radeon_write_agp_location(dev_priv, ++ (((dev_priv->gart_vm_start - 1 + ++ dev_priv->gart_size) & 0xffff0000) | ++ (dev_priv->gart_vm_start >> 16))); ++ ++ ring_start = (dev_priv->cp_ring->offset ++ - dev->agp->base ++ + dev_priv->gart_vm_start); ++ } else ++#endif ++ ring_start = (dev_priv->cp_ring->offset ++ - (unsigned long)dev->sg->virtual ++ + dev_priv->gart_vm_start); ++ ++ RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); ++ ++ /* Set the write pointer delay */ ++ RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); ++ ++ /* Initialize the ring buffer's read and write pointers */ ++ cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); ++ RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); ++ SET_RING_HEAD(dev_priv, cur_read_ptr); ++ dev_priv->ring.tail = cur_read_ptr; ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, ++ dev_priv->ring_rptr->offset ++ - dev->agp->base + dev_priv->gart_vm_start); ++ } else ++#endif ++ { ++ struct drm_sg_mem *entry = dev->sg; ++ unsigned long tmp_ofs, page_ofs; ++ ++ tmp_ofs = dev_priv->ring_rptr->offset - ++ (unsigned long)dev->sg->virtual; ++ page_ofs = tmp_ofs >> PAGE_SHIFT; ++ ++ RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); ++ DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n", ++ (unsigned long)entry->busaddr[page_ofs], ++ entry->handle + tmp_ofs); ++ } ++ ++ /* Set ring buffer size */ ++#ifdef __BIG_ENDIAN ++ RADEON_WRITE(RADEON_CP_RB_CNTL, ++ RADEON_BUF_SWAP_32BIT | ++ (dev_priv->ring.fetch_size_l2ow << 18) | ++ (dev_priv->ring.rptr_update_l2qw << 8) | ++ dev_priv->ring.size_l2qw); ++#else ++ RADEON_WRITE(RADEON_CP_RB_CNTL, ++ (dev_priv->ring.fetch_size_l2ow << 18) | ++ (dev_priv->ring.rptr_update_l2qw << 8) | ++ dev_priv->ring.size_l2qw); ++#endif ++ ++ /* Initialize the scratch register pointer. This will cause ++ * the scratch register values to be written out to memory ++ * whenever they are updated. ++ * ++ * We simply put this behind the ring read pointer, this works ++ * with PCI GART as well as (whatever kind of) AGP GART ++ */ ++ RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) ++ + RADEON_SCRATCH_REG_OFFSET); ++ ++ dev_priv->scratch = ((__volatile__ u32 *) ++ dev_priv->ring_rptr->handle + ++ (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); ++ ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); ++ ++ /* Turn on bus mastering */ ++ tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; ++ RADEON_WRITE(RADEON_BUS_CNTL, tmp); ++ ++ dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; ++ RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ++ ++ dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; ++ RADEON_WRITE(RADEON_LAST_DISPATCH_REG, ++ dev_priv->sarea_priv->last_dispatch); ++ ++ dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; ++ RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); ++ ++ radeon_do_wait_for_idle(dev_priv); ++ ++ /* Sync everything up */ ++ RADEON_WRITE(RADEON_ISYNC_CNTL, ++ (RADEON_ISYNC_ANY2D_IDLE3D | ++ RADEON_ISYNC_ANY3D_IDLE2D | ++ RADEON_ISYNC_WAIT_IDLEGUI | ++ RADEON_ISYNC_CPSCRATCH_IDLEGUI)); ++ ++} ++ ++static void radeon_test_writeback(drm_radeon_private_t * dev_priv) ++{ ++ u32 tmp; ++ ++ /* Writeback doesn't seem to work everywhere, test it here and possibly ++ * enable it if it appears to work ++ */ ++ DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); ++ RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); ++ ++ for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { ++ if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == ++ 0xdeadbeef) ++ break; ++ DRM_UDELAY(1); ++ } ++ ++ if (tmp < dev_priv->usec_timeout) { ++ dev_priv->writeback_works = 1; ++ DRM_INFO("writeback test succeeded in %d usecs\n", tmp); ++ } else { ++ dev_priv->writeback_works = 0; ++ DRM_INFO("writeback test failed\n"); ++ } ++ if (radeon_no_wb == 1) { ++ dev_priv->writeback_works = 0; ++ DRM_INFO("writeback forced off\n"); ++ } ++ ++ if (!dev_priv->writeback_works) { ++ /* Disable writeback to avoid unnecessary bus master transfers */ ++ RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); ++ RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); ++ } ++} ++ ++/* Enable or disable IGP GART on the chip */ ++static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 temp; ++ ++ if (on) { ++ DRM_DEBUG("programming igp gart %08X %08lX %08X\n", ++ dev_priv->gart_vm_start, ++ (long)dev_priv->gart_info.bus_addr, ++ dev_priv->gart_size); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ++ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | ++ RS690_BLOCK_GFX_D3_EN)); ++ else ++ IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); ++ ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | ++ RS480_VA_SIZE_32MB)); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); ++ IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | ++ RS480_TLB_ENABLE | ++ RS480_GTW_LAC_EN | ++ RS480_1LEVEL_GART)); ++ ++ temp = dev_priv->gart_info.bus_addr & 0xfffff000; ++ temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; ++ IGP_WRITE_MCIND(RS480_GART_BASE, temp); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); ++ IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | ++ RS480_REQ_TYPE_SNOOP_DIS)); ++ ++ radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); ++ ++ dev_priv->gart_size = 32*1024*1024; ++ temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & ++ 0xffff0000) | (dev_priv->gart_vm_start >> 16)); ++ ++ radeon_write_agp_location(dev_priv, temp); ++ ++ temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | ++ RS480_VA_SIZE_32MB)); ++ ++ do { ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) ++ break; ++ DRM_UDELAY(1); ++ } while(1); ++ ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, ++ RS480_GART_CACHE_INVALIDATE); ++ ++ do { ++ temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); ++ if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) ++ break; ++ DRM_UDELAY(1); ++ } while(1); ++ ++ IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); ++ } else { ++ IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); ++ } ++} ++ ++static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); ++ if (on) { ++ ++ DRM_DEBUG("programming pcie %08X %08lX %08X\n", ++ dev_priv->gart_vm_start, ++ (long)dev_priv->gart_info.bus_addr, ++ dev_priv->gart_size); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, ++ dev_priv->gart_vm_start); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, ++ dev_priv->gart_info.bus_addr); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, ++ dev_priv->gart_vm_start); ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, ++ dev_priv->gart_vm_start + ++ dev_priv->gart_size - 1); ++ ++ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ ++ ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, ++ RADEON_PCIE_TX_GART_EN); ++ } else { ++ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, ++ tmp & ~RADEON_PCIE_TX_GART_EN); ++ } ++} ++ ++/* Enable or disable PCI GART on the chip */ ++static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) ++{ ++ u32 tmp; ++ ++ if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || ++ (dev_priv->flags & RADEON_IS_IGPGART)) { ++ radeon_set_igpgart(dev_priv, on); ++ return; ++ } ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ radeon_set_pciegart(dev_priv, on); ++ return; ++ } ++ ++ tmp = RADEON_READ(RADEON_AIC_CNTL); ++ ++ if (on) { ++ RADEON_WRITE(RADEON_AIC_CNTL, ++ tmp | RADEON_PCIGART_TRANSLATE_EN); ++ ++ /* set PCI GART page-table base address ++ */ ++ RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); ++ ++ /* set address range for PCI address translate ++ */ ++ RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); ++ RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start ++ + dev_priv->gart_size - 1); ++ ++ /* Turn off AGP aperture -- is this required for PCI GART? ++ */ ++ radeon_write_agp_location(dev_priv, 0xffffffc0); ++ RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ ++ } else { ++ RADEON_WRITE(RADEON_AIC_CNTL, ++ tmp & ~RADEON_PCIGART_TRANSLATE_EN); ++ } ++} ++ ++static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ ++ /* if we require new memory map but we don't have it fail */ ++ if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { ++ DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) ++ { ++ DRM_DEBUG("Forcing AGP card to PCI mode\n"); ++ dev_priv->flags &= ~RADEON_IS_AGP; ++ } ++ else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) ++ && !init->is_pci) ++ { ++ DRM_DEBUG("Restoring AGP flag\n"); ++ dev_priv->flags |= RADEON_IS_AGP; ++ } ++ ++ if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { ++ DRM_ERROR("PCI GART memory not allocated!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->usec_timeout = init->usec_timeout; ++ if (dev_priv->usec_timeout < 1 || ++ dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { ++ DRM_DEBUG("TIMEOUT problem!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ /* Enable vblank on CRTC1 for older X servers ++ */ ++ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; ++ ++ dev_priv->do_boxes = 0; ++ dev_priv->cp_mode = init->cp_mode; ++ ++ /* We don't support anything other than bus-mastering ring mode, ++ * but the ring can be in either AGP or PCI space for the ring ++ * read pointer. ++ */ ++ if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && ++ (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { ++ DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ switch (init->fb_bpp) { ++ case 16: ++ dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; ++ break; ++ case 32: ++ default: ++ dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; ++ break; ++ } ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ ++ switch (init->depth_bpp) { ++ case 16: ++ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; ++ break; ++ case 32: ++ default: ++ dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; ++ break; ++ } ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ /* Hardware state for depth clears. Remove this if/when we no ++ * longer clear the depth buffer with a 3D rectangle. Hard-code ++ * all values to prevent unwanted 3D state from slipping through ++ * and screwing with the clear operation. ++ */ ++ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | ++ (dev_priv->color_fmt << 10) | ++ (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); ++ ++ dev_priv->depth_clear.rb3d_zstencilcntl = ++ (dev_priv->depth_fmt | ++ RADEON_Z_TEST_ALWAYS | ++ RADEON_STENCIL_TEST_ALWAYS | ++ RADEON_STENCIL_S_FAIL_REPLACE | ++ RADEON_STENCIL_ZPASS_REPLACE | ++ RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); ++ ++ dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | ++ RADEON_BFACE_SOLID | ++ RADEON_FFACE_SOLID | ++ RADEON_FLAT_SHADE_VTX_LAST | ++ RADEON_DIFFUSE_SHADE_FLAT | ++ RADEON_ALPHA_SHADE_FLAT | ++ RADEON_SPECULAR_SHADE_FLAT | ++ RADEON_FOG_SHADE_FLAT | ++ RADEON_VTX_PIX_CENTER_OGL | ++ RADEON_ROUND_MODE_TRUNC | ++ RADEON_ROUND_PREC_8TH_PIX); ++ ++ ++ dev_priv->ring_offset = init->ring_offset; ++ dev_priv->ring_rptr_offset = init->ring_rptr_offset; ++ dev_priv->buffers_offset = init->buffers_offset; ++ dev_priv->gart_textures_offset = init->gart_textures_offset; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); ++ if (!dev_priv->cp_ring) { ++ DRM_ERROR("could not find cp ring region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); ++ if (!dev_priv->ring_rptr) { ++ DRM_ERROR("could not find ring read pointer!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find dma buffer region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ ++ if (init->gart_textures_offset) { ++ dev_priv->gart_textures = ++ drm_core_findmap(dev, init->gart_textures_offset); ++ if (!dev_priv->gart_textures) { ++ DRM_ERROR("could not find GART texture region!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ drm_core_ioremap(dev_priv->cp_ring, dev); ++ drm_core_ioremap(dev_priv->ring_rptr, dev); ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev_priv->cp_ring->handle || ++ !dev_priv->ring_rptr->handle || ++ !dev->agp_buffer_map->handle) { ++ DRM_ERROR("could not find ioremap agp regions!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } else ++#endif ++ { ++ dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; ++ dev_priv->ring_rptr->handle = ++ (void *)dev_priv->ring_rptr->offset; ++ dev->agp_buffer_map->handle = ++ (void *)dev->agp_buffer_map->offset; ++ ++ DRM_DEBUG("dev_priv->cp_ring->handle %p\n", ++ dev_priv->cp_ring->handle); ++ DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", ++ dev_priv->ring_rptr->handle); ++ DRM_DEBUG("dev->agp_buffer_map->handle %p\n", ++ dev->agp_buffer_map->handle); ++ } ++ ++ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; ++ dev_priv->fb_size = ++ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) ++ - dev_priv->fb_location; ++ ++ dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | ++ ((dev_priv->front_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | ++ ((dev_priv->back_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | ++ ((dev_priv->depth_offset ++ + dev_priv->fb_location) >> 10)); ++ ++ dev_priv->gart_size = init->gart_size; ++ ++ /* New let's set the memory map ... */ ++ if (dev_priv->new_memmap) { ++ u32 base = 0; ++ ++ DRM_INFO("Setting GART location based on new memory map\n"); ++ ++ /* If using AGP, try to locate the AGP aperture at the same ++ * location in the card and on the bus, though we have to ++ * align it down. ++ */ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ base = dev->agp->base; ++ /* Check if valid */ ++ if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && ++ base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { ++ DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", ++ dev->agp->base); ++ base = 0; ++ } ++ } ++#endif ++ /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ ++ if (base == 0) { ++ base = dev_priv->fb_location + dev_priv->fb_size; ++ if (base < dev_priv->fb_location || ++ ((base + dev_priv->gart_size) & 0xfffffffful) < base) ++ base = dev_priv->fb_location ++ - dev_priv->gart_size; ++ } ++ dev_priv->gart_vm_start = base & 0xffc00000u; ++ if (dev_priv->gart_vm_start != base) ++ DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", ++ base, dev_priv->gart_vm_start); ++ } else { ++ DRM_INFO("Setting GART location based on old memory map\n"); ++ dev_priv->gart_vm_start = dev_priv->fb_location + ++ RADEON_READ(RADEON_CONFIG_APER_SIZE); ++ } ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) ++ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset ++ - dev->agp->base ++ + dev_priv->gart_vm_start); ++ else ++#endif ++ dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset ++ - (unsigned long)dev->sg->virtual ++ + dev_priv->gart_vm_start); ++ ++ DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); ++ DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); ++ DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", ++ dev_priv->gart_buffers_offset); ++ ++ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; ++ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle ++ + init->ring_size / sizeof(u32)); ++ dev_priv->ring.size = init->ring_size; ++ dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); ++ ++ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; ++ dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); ++ ++ dev_priv->ring.fetch_size = /* init->fetch_size */ 32; ++ dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); ++ ++ dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; ++ ++ dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ } else ++#endif ++ { ++ dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); ++ /* if we have an offset set from userspace */ ++ if (dev_priv->pcigart_offset_set) { ++ dev_priv->gart_info.bus_addr = ++ dev_priv->pcigart_offset + dev_priv->fb_location; ++ dev_priv->gart_info.mapping.offset = ++ dev_priv->pcigart_offset + dev_priv->fb_aper_offset; ++ dev_priv->gart_info.mapping.size = ++ dev_priv->gart_info.table_size; ++ ++ drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = ++ dev_priv->gart_info.mapping.handle; ++ ++ if (dev_priv->flags & RADEON_IS_PCIE) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ dev_priv->gart_info.gart_table_location = ++ DRM_ATI_GART_FB; ++ ++ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", ++ dev_priv->gart_info.addr, ++ dev_priv->pcigart_offset); ++ } else { ++ if (dev_priv->flags & RADEON_IS_IGPGART) ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; ++ else ++ dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; ++ dev_priv->gart_info.gart_table_location = ++ DRM_ATI_GART_MAIN; ++ dev_priv->gart_info.addr = NULL; ++ dev_priv->gart_info.bus_addr = 0; ++ if (dev_priv->flags & RADEON_IS_PCIE) { ++ DRM_ERROR ++ ("Cannot use PCI Express without GART in FB memory\n"); ++ radeon_do_cleanup_cp(dev); ++ return -EINVAL; ++ } ++ } ++ ++ if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { ++ DRM_ERROR("failed to init PCI GART!\n"); ++ radeon_do_cleanup_cp(dev); ++ return -ENOMEM; ++ } ++ ++ /* Turn on PCI GART */ ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ ++ /* Start with assuming that writeback doesn't work */ ++ dev_priv->writeback_works = 0; ++ ++ radeon_cp_load_microcode(dev_priv); ++ radeon_cp_init_ring_buffer(dev, dev_priv); ++ ++ dev_priv->last_buf = 0; ++ ++ radeon_do_engine_reset(dev); ++ radeon_test_writeback(dev_priv); ++ ++ return 0; ++} ++ ++static int radeon_do_cleanup_cp(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ /* Make sure interrupts are disabled here because the uninstall ioctl ++ * may not have been called from userspace and after dev_private ++ * is freed, it's too late. ++ */ ++ if (dev->irq_enabled) ++ drm_irq_uninstall(dev); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ if (dev_priv->cp_ring != NULL) { ++ drm_core_ioremapfree(dev_priv->cp_ring, dev); ++ dev_priv->cp_ring = NULL; ++ } ++ if (dev_priv->ring_rptr != NULL) { ++ drm_core_ioremapfree(dev_priv->ring_rptr, dev); ++ dev_priv->ring_rptr = NULL; ++ } ++ if (dev->agp_buffer_map != NULL) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ dev->agp_buffer_map = NULL; ++ } ++ } else ++#endif ++ { ++ ++ if (dev_priv->gart_info.bus_addr) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) ++ DRM_ERROR("failed to cleanup PCI GART!\n"); ++ } ++ ++ if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) ++ { ++ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); ++ dev_priv->gart_info.addr = 0; ++ } ++ } ++ /* only clear to the start of flags */ ++ memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); ++ ++ return 0; ++} ++ ++/* This code will reinit the Radeon CP hardware after a resume from disc. ++ * AFAIK, it would be very difficult to pickle the state at suspend time, so ++ * here we make sure that all Radeon hardware initialisation is re-done without ++ * affecting running applications. ++ * ++ * Charl P. Botha ++ */ ++static int radeon_do_resume_cp(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("Called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("Starting radeon_do_resume_cp()\n"); ++ ++#if __OS_HAS_AGP ++ if (dev_priv->flags & RADEON_IS_AGP) { ++ /* Turn off PCI GART */ ++ radeon_set_pcigart(dev_priv, 0); ++ } else ++#endif ++ { ++ /* Turn on PCI GART */ ++ radeon_set_pcigart(dev_priv, 1); ++ } ++ ++ radeon_cp_load_microcode(dev_priv); ++ radeon_cp_init_ring_buffer(dev, dev_priv); ++ ++ radeon_do_engine_reset(dev); ++ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); ++ ++ DRM_DEBUG("radeon_do_resume_cp() complete\n"); ++ ++ return 0; ++} ++ ++int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_init_t *init = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (init->func == RADEON_INIT_R300_CP) ++ r300_init_reg_flags(dev); ++ ++ switch (init->func) { ++ case RADEON_INIT_CP: ++ case RADEON_INIT_R200_CP: ++ case RADEON_INIT_R300_CP: ++ return radeon_do_init_cp(dev, init); ++ case RADEON_CLEANUP_CP: ++ return radeon_do_cleanup_cp(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dev_priv->cp_running) { ++ DRM_DEBUG("while CP running\n"); ++ return 0; ++ } ++ if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { ++ DRM_DEBUG("called with bogus CP mode (%d)\n", ++ dev_priv->cp_mode); ++ return 0; ++ } ++ ++ radeon_do_cp_start(dev_priv); ++ ++ return 0; ++} ++ ++/* Stop the CP. The engine must have been idled before calling this ++ * routine. ++ */ ++int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_cp_stop_t *stop = data; ++ int ret; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv->cp_running) ++ return 0; ++ ++ /* Flush any pending CP commands. This ensures any outstanding ++ * commands are exectuted by the engine before we turn it off. ++ */ ++ if (stop->flush) { ++ radeon_do_cp_flush(dev_priv); ++ } ++ ++ /* If we fail to make the engine go idle, we return an error ++ * code so that the DRM ioctl wrapper can try again. ++ */ ++ if (stop->idle) { ++ ret = radeon_do_cp_idle(dev_priv); ++ if (ret) ++ return ret; ++ } ++ ++ /* Finally, we can turn off the CP. If the engine isn't idle, ++ * we will get some dropped triangles as they won't be fully ++ * rendered before the CP is shut down. ++ */ ++ radeon_do_cp_stop(dev_priv); ++ ++ /* Reset the engine */ ++ radeon_do_engine_reset(dev); ++ ++ return 0; ++} ++ ++void radeon_do_release(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i, ret; ++ ++ if (dev_priv) { ++ if (dev_priv->cp_running) { ++ /* Stop the cp */ ++ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { ++ DRM_DEBUG("radeon_do_cp_idle %d\n", ret); ++#ifdef __linux__ ++ schedule(); ++#else ++#if defined(__FreeBSD__) && __FreeBSD_version > 500000 ++ mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel", ++ 1); ++#else ++ tsleep(&ret, PZERO, "rdnrel", 1); ++#endif ++#endif ++ } ++ radeon_do_cp_stop(dev_priv); ++ radeon_do_engine_reset(dev); ++ } ++ ++ /* Disable *all* interrupts */ ++ if (dev_priv->mmio) /* remove this after permanent addmaps */ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ ++ if (dev_priv->mmio) { /* remove all surfaces */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); ++ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + ++ 16 * i, 0); ++ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + ++ 16 * i, 0); ++ } ++ } ++ ++ /* Free memory heap structures */ ++ radeon_mem_takedown(&(dev_priv->gart_heap)); ++ radeon_mem_takedown(&(dev_priv->fb_heap)); ++ ++ /* deallocate kernel resources */ ++ radeon_do_cleanup_cp(dev); ++ } ++} ++ ++/* Just reset the CP ring. Called as part of an X Server engine reset. ++ */ ++int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_DEBUG("called before init done\n"); ++ return -EINVAL; ++ } ++ ++ radeon_do_cp_reset(dev_priv); ++ ++ /* The CP is no longer running after an engine reset */ ++ dev_priv->cp_running = 0; ++ ++ return 0; ++} ++ ++int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return radeon_do_cp_idle(dev_priv); ++} ++ ++/* Added by Charl P. Botha to call radeon_do_resume_cp(). ++ */ ++int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ ++ return radeon_do_resume_cp(dev); ++} ++ ++int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ return radeon_do_engine_reset(dev); ++} ++ ++/* ================================================================ ++ * Fullscreen mode ++ */ ++ ++/* KW: Deprecated to say the least: ++ */ ++int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ return 0; ++} ++ ++/* ================================================================ ++ * Freelist management ++ */ ++ ++/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through ++ * bufs until freelist code is used. Note this hides a problem with ++ * the scratch register * (used to keep track of last buffer ++ * completed) being written to before * the last buffer has actually ++ * completed rendering. ++ * ++ * KW: It's also a good way to find free buffers quickly. ++ * ++ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't ++ * sleep. However, bugs in older versions of radeon_accel.c mean that ++ * we essentially have to do this, else old clients will break. ++ * ++ * However, it does leave open a potential deadlock where all the ++ * buffers are held by other clients, which can't release them because ++ * they can't get the lock. ++ */ ++ ++struct drm_buf *radeon_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ int start; ++ ++ if (++dev_priv->last_buf >= dma->buf_count) ++ dev_priv->last_buf = 0; ++ ++ start = dev_priv->last_buf; ++ ++ for (t = 0; t < dev_priv->usec_timeout; t++) { ++ u32 done_age = GET_SCRATCH(1); ++ DRM_DEBUG("done_age = %d\n", done_age); ++ for (i = start; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == NULL || (buf->pending && ++ buf_priv->age <= ++ done_age)) { ++ dev_priv->stats.requested_bufs++; ++ buf->pending = 0; ++ return buf; ++ } ++ start = 0; ++ } ++ ++ if (t) { ++ DRM_UDELAY(1); ++ dev_priv->stats.freelist_loops++; ++ } ++ } ++ ++ DRM_DEBUG("returning NULL!\n"); ++ return NULL; ++} ++ ++#if 0 ++struct drm_buf *radeon_freelist_get(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv; ++ struct drm_buf *buf; ++ int i, t; ++ int start; ++ u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); ++ ++ if (++dev_priv->last_buf >= dma->buf_count) ++ dev_priv->last_buf = 0; ++ ++ start = dev_priv->last_buf; ++ dev_priv->stats.freelist_loops++; ++ ++ for (t = 0; t < 2; t++) { ++ for (i = start; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ buf_priv = buf->dev_private; ++ if (buf->file_priv == 0 || (buf->pending && ++ buf_priv->age <= ++ done_age)) { ++ dev_priv->stats.requested_bufs++; ++ buf->pending = 0; ++ return buf; ++ } ++ } ++ start = 0; ++ } ++ ++ return NULL; ++} ++#endif ++ ++void radeon_freelist_reset(struct drm_device * dev) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ dev_priv->last_buf = 0; ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ buf_priv->age = 0; ++ } ++} ++ ++/* ================================================================ ++ * CP command submission ++ */ ++ ++int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) ++{ ++ drm_radeon_ring_buffer_t *ring = &dev_priv->ring; ++ int i; ++ u32 last_head = GET_RING_HEAD(dev_priv); ++ ++ for (i = 0; i < dev_priv->usec_timeout; i++) { ++ u32 head = GET_RING_HEAD(dev_priv); ++ ++ ring->space = (head - ring->tail) * sizeof(u32); ++ if (ring->space <= 0) ++ ring->space += ring->size; ++ if (ring->space > n) ++ return 0; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ if (head != last_head) ++ i = 0; ++ last_head = head; ++ ++ DRM_UDELAY(1); ++ } ++ ++ /* FIXME: This return value is ignored in the BEGIN_RING macro! */ ++#if RADEON_FIFO_DEBUG ++ radeon_status(dev_priv); ++ DRM_ERROR("failed!\n"); ++#endif ++ return -EBUSY; ++} ++ ++static int radeon_cp_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma * d) ++{ ++ int i; ++ struct drm_buf *buf; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = radeon_freelist_get(dev); ++ if (!buf) ++ return -EBUSY; /* NOTE: broken client */ ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, ++ sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, ++ sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ int ret = 0; ++ struct drm_dma *d = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = radeon_cp_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++int radeon_driver_load(struct drm_device *dev, unsigned long flags) ++{ ++ drm_radeon_private_t *dev_priv; ++ int ret = 0; ++ ++ dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_radeon_private_t)); ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->flags = flags; ++ ++ switch (flags & RADEON_FAMILY_MASK) { ++ case CHIP_R100: ++ case CHIP_RV200: ++ case CHIP_R200: ++ case CHIP_R300: ++ case CHIP_R350: ++ case CHIP_R420: ++ case CHIP_RV410: ++ case CHIP_RV515: ++ case CHIP_R520: ++ case CHIP_RV570: ++ case CHIP_R580: ++ dev_priv->flags |= RADEON_HAS_HIERZ; ++ break; ++ default: ++ /* all other chips have no hierarchical z buffer */ ++ break; ++ } ++ ++ dev_priv->chip_family = flags & RADEON_FAMILY_MASK; ++ if (drm_device_is_agp(dev)) ++ dev_priv->flags |= RADEON_IS_AGP; ++ else if (drm_device_is_pcie(dev)) ++ dev_priv->flags |= RADEON_IS_PCIE; ++ else ++ dev_priv->flags |= RADEON_IS_PCI; ++ ++ DRM_DEBUG("%s card detected\n", ++ ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); ++ return ret; ++} ++ ++/* Create mappings for registers and framebuffer so userland doesn't necessarily ++ * have to find them. ++ */ ++int radeon_driver_firstopen(struct drm_device *dev) ++{ ++ int ret; ++ drm_local_map_t *map; ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; ++ ++ ret = drm_addmap(dev, drm_get_resource_start(dev, 2), ++ drm_get_resource_len(dev, 2), _DRM_REGISTERS, ++ _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret != 0) ++ return ret; ++ ++ dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); ++ ret = drm_addmap(dev, dev_priv->fb_aper_offset, ++ drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, ++ _DRM_WRITE_COMBINING, &map); ++ if (ret != 0) ++ return ret; ++ ++ return 0; ++} ++ ++int radeon_driver_unload(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ DRM_DEBUG("\n"); ++ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); ++ ++ dev->dev_private = NULL; ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drm.h git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h +--- git/drivers/gpu/drm-tungsten/radeon_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,750 @@ ++/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- ++ * ++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ * Keith Whitwell ++ */ ++ ++#ifndef __RADEON_DRM_H__ ++#define __RADEON_DRM_H__ ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the X server file (radeon_sarea.h) ++ */ ++#ifndef __RADEON_SAREA_DEFINES__ ++#define __RADEON_SAREA_DEFINES__ ++ ++/* Old style state flags, required for sarea interface (1.1 and 1.2 ++ * clears) and 1.2 drm_vertex2 ioctl. ++ */ ++#define RADEON_UPLOAD_CONTEXT 0x00000001 ++#define RADEON_UPLOAD_VERTFMT 0x00000002 ++#define RADEON_UPLOAD_LINE 0x00000004 ++#define RADEON_UPLOAD_BUMPMAP 0x00000008 ++#define RADEON_UPLOAD_MASKS 0x00000010 ++#define RADEON_UPLOAD_VIEWPORT 0x00000020 ++#define RADEON_UPLOAD_SETUP 0x00000040 ++#define RADEON_UPLOAD_TCL 0x00000080 ++#define RADEON_UPLOAD_MISC 0x00000100 ++#define RADEON_UPLOAD_TEX0 0x00000200 ++#define RADEON_UPLOAD_TEX1 0x00000400 ++#define RADEON_UPLOAD_TEX2 0x00000800 ++#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 ++#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 ++#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 ++#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ ++#define RADEON_REQUIRE_QUIESCENCE 0x00010000 ++#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ ++#define RADEON_UPLOAD_ALL 0x003effff ++#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff ++ ++/* New style per-packet identifiers for use in cmd_buffer ioctl with ++ * the RADEON_EMIT_PACKET command. Comments relate new packets to old ++ * state bits and the packet size: ++ */ ++#define RADEON_EMIT_PP_MISC 0 /* context/7 */ ++#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ ++#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ ++#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ ++#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ ++#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ ++#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ ++#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ ++#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ ++#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ ++#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ ++#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ ++#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ ++#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ ++#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ ++#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ ++#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ ++#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ ++#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ ++#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ ++#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ ++#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ ++#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ ++#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ ++#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ ++#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ ++#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ ++#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ ++#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ ++#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ ++#define R200_EMIT_VAP_CTL 32 /* vap/1 */ ++#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ ++#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ ++#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ ++#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ ++#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ ++#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ ++#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ ++#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ ++#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ ++#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ ++#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ ++#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ ++#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ ++#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ ++#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ ++#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ ++#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ ++#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ ++#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ ++#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ ++#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ ++#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ ++#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ ++#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ ++#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ ++#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ ++#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ ++#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ ++#define R200_EMIT_PP_CUBIC_FACES_0 61 ++#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 ++#define R200_EMIT_PP_CUBIC_FACES_1 63 ++#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 ++#define R200_EMIT_PP_CUBIC_FACES_2 65 ++#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 ++#define R200_EMIT_PP_CUBIC_FACES_3 67 ++#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 ++#define R200_EMIT_PP_CUBIC_FACES_4 69 ++#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 ++#define R200_EMIT_PP_CUBIC_FACES_5 71 ++#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 ++#define RADEON_EMIT_PP_TEX_SIZE_0 73 ++#define RADEON_EMIT_PP_TEX_SIZE_1 74 ++#define RADEON_EMIT_PP_TEX_SIZE_2 75 ++#define R200_EMIT_RB3D_BLENDCOLOR 76 ++#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 ++#define RADEON_EMIT_PP_CUBIC_FACES_0 78 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 ++#define RADEON_EMIT_PP_CUBIC_FACES_1 80 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 ++#define RADEON_EMIT_PP_CUBIC_FACES_2 82 ++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 ++#define R200_EMIT_PP_TRI_PERF_CNTL 84 ++#define R200_EMIT_PP_AFS_0 85 ++#define R200_EMIT_PP_AFS_1 86 ++#define R200_EMIT_ATF_TFACTOR 87 ++#define R200_EMIT_PP_TXCTLALL_0 88 ++#define R200_EMIT_PP_TXCTLALL_1 89 ++#define R200_EMIT_PP_TXCTLALL_2 90 ++#define R200_EMIT_PP_TXCTLALL_3 91 ++#define R200_EMIT_PP_TXCTLALL_4 92 ++#define R200_EMIT_PP_TXCTLALL_5 93 ++#define R200_EMIT_VAP_PVS_CNTL 94 ++#define RADEON_MAX_STATE_PACKETS 95 ++ ++/* Commands understood by cmd_buffer ioctl. More can be added but ++ * obviously these can't be removed or changed: ++ */ ++#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ ++#define RADEON_CMD_SCALARS 2 /* emit scalar data */ ++#define RADEON_CMD_VECTORS 3 /* emit vector data */ ++#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ ++#define RADEON_CMD_PACKET3 5 /* emit hw packet */ ++#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ ++#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ ++#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: ++ * doesn't make the cpu wait, just ++ * the graphics hardware */ ++#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ ++ ++typedef union { ++ int i; ++ struct { ++ unsigned char cmd_type, pad0, pad1, pad2; ++ } header; ++ struct { ++ unsigned char cmd_type, packet_id, pad0, pad1; ++ } packet; ++ struct { ++ unsigned char cmd_type, offset, stride, count; ++ } scalars; ++ struct { ++ unsigned char cmd_type, offset, stride, count; ++ } vectors; ++ struct { ++ unsigned char cmd_type, addr_lo, addr_hi, count; ++ } veclinear; ++ struct { ++ unsigned char cmd_type, buf_idx, pad0, pad1; ++ } dma; ++ struct { ++ unsigned char cmd_type, flags, pad0, pad1; ++ } wait; ++} drm_radeon_cmd_header_t; ++ ++#define RADEON_WAIT_2D 0x1 ++#define RADEON_WAIT_3D 0x2 ++ ++/* Allowed parameters for R300_CMD_PACKET3 ++ */ ++#define R300_CMD_PACKET3_CLEAR 0 ++#define R300_CMD_PACKET3_RAW 1 ++ ++/* Commands understood by cmd_buffer ioctl for R300. ++ * The interface has not been stabilized, so some of these may be removed ++ * and eventually reordered before stabilization. ++ */ ++#define R300_CMD_PACKET0 1 ++#define R300_CMD_VPU 2 /* emit vertex program upload */ ++#define R300_CMD_PACKET3 3 /* emit a packet3 */ ++#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ ++#define R300_CMD_CP_DELAY 5 ++#define R300_CMD_DMA_DISCARD 6 ++#define R300_CMD_WAIT 7 ++# define R300_WAIT_2D 0x1 ++# define R300_WAIT_3D 0x2 ++/* these two defines are DOING IT WRONG - however ++ * we have userspace which relies on using these. ++ * The wait interface is backwards compat new ++ * code should use the NEW_WAIT defines below ++ * THESE ARE NOT BIT FIELDS ++ */ ++# define R300_WAIT_2D_CLEAN 0x3 ++# define R300_WAIT_3D_CLEAN 0x4 ++ ++# define R300_NEW_WAIT_2D_3D 0x3 ++# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 ++# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 ++# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 ++ ++#define R300_CMD_SCRATCH 8 ++#define R300_CMD_R500FP 9 ++ ++typedef union { ++ unsigned int u; ++ struct { ++ unsigned char cmd_type, pad0, pad1, pad2; ++ } header; ++ struct { ++ unsigned char cmd_type, count, reglo, reghi; ++ } packet0; ++ struct { ++ unsigned char cmd_type, count, adrlo, adrhi; ++ } vpu; ++ struct { ++ unsigned char cmd_type, packet, pad0, pad1; ++ } packet3; ++ struct { ++ unsigned char cmd_type, packet; ++ unsigned short count; /* amount of packet2 to emit */ ++ } delay; ++ struct { ++ unsigned char cmd_type, buf_idx, pad0, pad1; ++ } dma; ++ struct { ++ unsigned char cmd_type, flags, pad0, pad1; ++ } wait; ++ struct { ++ unsigned char cmd_type, reg, n_bufs, flags; ++ } scratch; ++ struct { ++ unsigned char cmd_type, count, adrlo, adrhi_flags; ++ } r500fp; ++} drm_r300_cmd_header_t; ++ ++#define RADEON_FRONT 0x1 ++#define RADEON_BACK 0x2 ++#define RADEON_DEPTH 0x4 ++#define RADEON_STENCIL 0x8 ++#define RADEON_CLEAR_FASTZ 0x80000000 ++#define RADEON_USE_HIERZ 0x40000000 ++#define RADEON_USE_COMP_ZBUF 0x20000000 ++ ++#define R500FP_CONSTANT_TYPE (1 << 1) ++#define R500FP_CONSTANT_CLAMP (1 << 2) ++ ++/* Primitive types ++ */ ++#define RADEON_POINTS 0x1 ++#define RADEON_LINES 0x2 ++#define RADEON_LINE_STRIP 0x3 ++#define RADEON_TRIANGLES 0x4 ++#define RADEON_TRIANGLE_FAN 0x5 ++#define RADEON_TRIANGLE_STRIP 0x6 ++ ++/* Vertex/indirect buffer size ++ */ ++#define RADEON_BUFFER_SIZE 65536 ++ ++/* Byte offsets for indirect buffer data ++ */ ++#define RADEON_INDEX_PRIM_OFFSET 20 ++ ++#define RADEON_SCRATCH_REG_OFFSET 32 ++ ++#define RADEON_NR_SAREA_CLIPRECTS 12 ++ ++/* There are 2 heaps (local/GART). Each region within a heap is a ++ * minimum of 64k, and there are at most 64 of them per heap. ++ */ ++#define RADEON_LOCAL_TEX_HEAP 0 ++#define RADEON_GART_TEX_HEAP 1 ++#define RADEON_NR_TEX_HEAPS 2 ++#define RADEON_NR_TEX_REGIONS 64 ++#define RADEON_LOG_TEX_GRANULARITY 16 ++ ++#define RADEON_MAX_TEXTURE_LEVELS 12 ++#define RADEON_MAX_TEXTURE_UNITS 3 ++ ++#define RADEON_MAX_SURFACES 8 ++ ++/* Blits have strict offset rules. All blit offset must be aligned on ++ * a 1K-byte boundary. ++ */ ++#define RADEON_OFFSET_SHIFT 10 ++#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) ++#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) ++ ++#endif /* __RADEON_SAREA_DEFINES__ */ ++ ++typedef struct { ++ unsigned int red; ++ unsigned int green; ++ unsigned int blue; ++ unsigned int alpha; ++} radeon_color_regs_t; ++ ++typedef struct { ++ /* Context state */ ++ unsigned int pp_misc; /* 0x1c14 */ ++ unsigned int pp_fog_color; ++ unsigned int re_solid_color; ++ unsigned int rb3d_blendcntl; ++ unsigned int rb3d_depthoffset; ++ unsigned int rb3d_depthpitch; ++ unsigned int rb3d_zstencilcntl; ++ ++ unsigned int pp_cntl; /* 0x1c38 */ ++ unsigned int rb3d_cntl; ++ unsigned int rb3d_coloroffset; ++ unsigned int re_width_height; ++ unsigned int rb3d_colorpitch; ++ unsigned int se_cntl; ++ ++ /* Vertex format state */ ++ unsigned int se_coord_fmt; /* 0x1c50 */ ++ ++ /* Line state */ ++ unsigned int re_line_pattern; /* 0x1cd0 */ ++ unsigned int re_line_state; ++ ++ unsigned int se_line_width; /* 0x1db8 */ ++ ++ /* Bumpmap state */ ++ unsigned int pp_lum_matrix; /* 0x1d00 */ ++ ++ unsigned int pp_rot_matrix_0; /* 0x1d58 */ ++ unsigned int pp_rot_matrix_1; ++ ++ /* Mask state */ ++ unsigned int rb3d_stencilrefmask; /* 0x1d7c */ ++ unsigned int rb3d_ropcntl; ++ unsigned int rb3d_planemask; ++ ++ /* Viewport state */ ++ unsigned int se_vport_xscale; /* 0x1d98 */ ++ unsigned int se_vport_xoffset; ++ unsigned int se_vport_yscale; ++ unsigned int se_vport_yoffset; ++ unsigned int se_vport_zscale; ++ unsigned int se_vport_zoffset; ++ ++ /* Setup state */ ++ unsigned int se_cntl_status; /* 0x2140 */ ++ ++ /* Misc state */ ++ unsigned int re_top_left; /* 0x26c0 */ ++ unsigned int re_misc; ++} drm_radeon_context_regs_t; ++ ++typedef struct { ++ /* Zbias state */ ++ unsigned int se_zbias_factor; /* 0x1dac */ ++ unsigned int se_zbias_constant; ++} drm_radeon_context2_regs_t; ++ ++/* Setup registers for each texture unit ++ */ ++typedef struct { ++ unsigned int pp_txfilter; ++ unsigned int pp_txformat; ++ unsigned int pp_txoffset; ++ unsigned int pp_txcblend; ++ unsigned int pp_txablend; ++ unsigned int pp_tfactor; ++ unsigned int pp_border_color; ++} drm_radeon_texture_regs_t; ++ ++typedef struct { ++ unsigned int start; ++ unsigned int finish; ++ unsigned int prim:8; ++ unsigned int stateidx:8; ++ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ ++ unsigned int vc_format; /* vertex format */ ++} drm_radeon_prim_t; ++ ++typedef struct { ++ drm_radeon_context_regs_t context; ++ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; ++ drm_radeon_context2_regs_t context2; ++ unsigned int dirty; ++} drm_radeon_state_t; ++ ++typedef struct { ++ /* The channel for communication of state information to the ++ * kernel on firing a vertex buffer with either of the ++ * obsoleted vertex/index ioctls. ++ */ ++ drm_radeon_context_regs_t context_state; ++ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; ++ unsigned int dirty; ++ unsigned int vertsize; ++ unsigned int vc_format; ++ ++ /* The current cliprects, or a subset thereof. ++ */ ++ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; ++ unsigned int nbox; ++ ++ /* Counters for client-side throttling of rendering clients. ++ */ ++ unsigned int last_frame; ++ unsigned int last_dispatch; ++ unsigned int last_clear; ++ ++ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + ++ 1]; ++ unsigned int tex_age[RADEON_NR_TEX_HEAPS]; ++ int ctx_owner; ++ int pfState; /* number of 3d windows (0,1,2ormore) */ ++ int pfCurrentPage; /* which buffer is being displayed? */ ++ int crtc2_base; /* CRTC2 frame offset */ ++ int tiling_enabled; /* set by drm, read by 2d + 3d clients */ ++} drm_radeon_sarea_t; ++ ++/* WARNING: If you change any of these defines, make sure to change the ++ * defines in the Xserver file (xf86drmRadeon.h) ++ * ++ * KW: actually it's illegal to change any of this (backwards compatibility). ++ */ ++ ++/* Radeon specific ioctls ++ * The device specific ioctl range is 0x40 to 0x79. ++ */ ++#define DRM_RADEON_CP_INIT 0x00 ++#define DRM_RADEON_CP_START 0x01 ++#define DRM_RADEON_CP_STOP 0x02 ++#define DRM_RADEON_CP_RESET 0x03 ++#define DRM_RADEON_CP_IDLE 0x04 ++#define DRM_RADEON_RESET 0x05 ++#define DRM_RADEON_FULLSCREEN 0x06 ++#define DRM_RADEON_SWAP 0x07 ++#define DRM_RADEON_CLEAR 0x08 ++#define DRM_RADEON_VERTEX 0x09 ++#define DRM_RADEON_INDICES 0x0A ++#define DRM_RADEON_NOT_USED ++#define DRM_RADEON_STIPPLE 0x0C ++#define DRM_RADEON_INDIRECT 0x0D ++#define DRM_RADEON_TEXTURE 0x0E ++#define DRM_RADEON_VERTEX2 0x0F ++#define DRM_RADEON_CMDBUF 0x10 ++#define DRM_RADEON_GETPARAM 0x11 ++#define DRM_RADEON_FLIP 0x12 ++#define DRM_RADEON_ALLOC 0x13 ++#define DRM_RADEON_FREE 0x14 ++#define DRM_RADEON_INIT_HEAP 0x15 ++#define DRM_RADEON_IRQ_EMIT 0x16 ++#define DRM_RADEON_IRQ_WAIT 0x17 ++#define DRM_RADEON_CP_RESUME 0x18 ++#define DRM_RADEON_SETPARAM 0x19 ++#define DRM_RADEON_SURF_ALLOC 0x1a ++#define DRM_RADEON_SURF_FREE 0x1b ++ ++#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) ++#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) ++#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) ++#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) ++#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) ++#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) ++#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) ++#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) ++#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) ++#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) ++#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) ++#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) ++#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) ++#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) ++#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) ++#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) ++#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) ++#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) ++#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) ++#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) ++#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) ++#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) ++#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) ++#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) ++#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) ++#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) ++#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) ++ ++typedef struct drm_radeon_init { ++ enum { ++ RADEON_INIT_CP = 0x01, ++ RADEON_CLEANUP_CP = 0x02, ++ RADEON_INIT_R200_CP = 0x03, ++ RADEON_INIT_R300_CP = 0x04 ++ } func; ++ unsigned long sarea_priv_offset; ++ int is_pci; /* for overriding only */ ++ int cp_mode; ++ int gart_size; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ unsigned long fb_offset DEPRECATED; /* deprecated, driver asks hardware */ ++ unsigned long mmio_offset DEPRECATED; /* deprecated, driver asks hardware */ ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long gart_textures_offset; ++} drm_radeon_init_t; ++ ++typedef struct drm_radeon_cp_stop { ++ int flush; ++ int idle; ++} drm_radeon_cp_stop_t; ++ ++typedef struct drm_radeon_fullscreen { ++ enum { ++ RADEON_INIT_FULLSCREEN = 0x01, ++ RADEON_CLEANUP_FULLSCREEN = 0x02 ++ } func; ++} drm_radeon_fullscreen_t; ++ ++#define CLEAR_X1 0 ++#define CLEAR_Y1 1 ++#define CLEAR_X2 2 ++#define CLEAR_Y2 3 ++#define CLEAR_DEPTH 4 ++ ++typedef union drm_radeon_clear_rect { ++ float f[5]; ++ unsigned int ui[5]; ++} drm_radeon_clear_rect_t; ++ ++typedef struct drm_radeon_clear { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; /* misnamed field: should be stencil */ ++ drm_radeon_clear_rect_t __user *depth_boxes; ++} drm_radeon_clear_t; ++ ++typedef struct drm_radeon_vertex { ++ int prim; ++ int idx; /* Index of vertex buffer */ ++ int count; /* Number of vertices in buffer */ ++ int discard; /* Client finished with buffer? */ ++} drm_radeon_vertex_t; ++ ++typedef struct drm_radeon_indices { ++ int prim; ++ int idx; ++ int start; ++ int end; ++ int discard; /* Client finished with buffer? */ ++} drm_radeon_indices_t; ++ ++/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices ++ * - allows multiple primitives and state changes in a single ioctl ++ * - supports driver change to emit native primitives ++ */ ++typedef struct drm_radeon_vertex2 { ++ int idx; /* Index of vertex buffer */ ++ int discard; /* Client finished with buffer? */ ++ int nr_states; ++ drm_radeon_state_t __user *state; ++ int nr_prims; ++ drm_radeon_prim_t __user *prim; ++} drm_radeon_vertex2_t; ++ ++/* v1.3 - obsoletes drm_radeon_vertex2 ++ * - allows arbitarily large cliprect list ++ * - allows updating of tcl packet, vector and scalar state ++ * - allows memory-efficient description of state updates ++ * - allows state to be emitted without a primitive ++ * (for clears, ctx switches) ++ * - allows more than one dma buffer to be referenced per ioctl ++ * - supports tcl driver ++ * - may be extended in future versions with new cmd types, packets ++ */ ++typedef struct drm_radeon_cmd_buffer { ++ int bufsz; ++ char __user *buf; ++ int nbox; ++ struct drm_clip_rect __user *boxes; ++} drm_radeon_cmd_buffer_t; ++ ++typedef struct drm_radeon_tex_image { ++ unsigned int x, y; /* Blit coordinates */ ++ unsigned int width, height; ++ const void __user *data; ++} drm_radeon_tex_image_t; ++ ++typedef struct drm_radeon_texture { ++ unsigned int offset; ++ int pitch; ++ int format; ++ int width; /* Texture image coordinates */ ++ int height; ++ drm_radeon_tex_image_t __user *image; ++} drm_radeon_texture_t; ++ ++typedef struct drm_radeon_stipple { ++ unsigned int __user *mask; ++} drm_radeon_stipple_t; ++ ++typedef struct drm_radeon_indirect { ++ int idx; ++ int start; ++ int end; ++ int discard; ++} drm_radeon_indirect_t; ++ ++/* enum for card type parameters */ ++#define RADEON_CARD_PCI 0 ++#define RADEON_CARD_AGP 1 ++#define RADEON_CARD_PCIE 2 ++ ++/* 1.3: An ioctl to get parameters that aren't available to the 3d ++ * client any other way. ++ */ ++#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ ++#define RADEON_PARAM_LAST_FRAME 2 ++#define RADEON_PARAM_LAST_DISPATCH 3 ++#define RADEON_PARAM_LAST_CLEAR 4 ++/* Added with DRM version 1.6. */ ++#define RADEON_PARAM_IRQ_NR 5 ++#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ ++/* Added with DRM version 1.8. */ ++#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ ++#define RADEON_PARAM_STATUS_HANDLE 8 ++#define RADEON_PARAM_SAREA_HANDLE 9 ++#define RADEON_PARAM_GART_TEX_HANDLE 10 ++#define RADEON_PARAM_SCRATCH_OFFSET 11 ++#define RADEON_PARAM_CARD_TYPE 12 ++#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ ++#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ ++#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ ++ ++typedef struct drm_radeon_getparam { ++ int param; ++ void __user *value; ++} drm_radeon_getparam_t; ++ ++/* 1.6: Set up a memory manager for regions of shared memory: ++ */ ++#define RADEON_MEM_REGION_GART 1 ++#define RADEON_MEM_REGION_FB 2 ++ ++typedef struct drm_radeon_mem_alloc { ++ int region; ++ int alignment; ++ int size; ++ int __user *region_offset; /* offset from start of fb or GART */ ++} drm_radeon_mem_alloc_t; ++ ++typedef struct drm_radeon_mem_free { ++ int region; ++ int region_offset; ++} drm_radeon_mem_free_t; ++ ++typedef struct drm_radeon_mem_init_heap { ++ int region; ++ int size; ++ int start; ++} drm_radeon_mem_init_heap_t; ++ ++/* 1.6: Userspace can request & wait on irq's: ++ */ ++typedef struct drm_radeon_irq_emit { ++ int __user *irq_seq; ++} drm_radeon_irq_emit_t; ++ ++typedef struct drm_radeon_irq_wait { ++ int irq_seq; ++} drm_radeon_irq_wait_t; ++ ++/* 1.10: Clients tell the DRM where they think the framebuffer is located in ++ * the card's address space, via a new generic ioctl to set parameters ++ */ ++ ++typedef struct drm_radeon_setparam { ++ unsigned int param; ++ int64_t value; ++} drm_radeon_setparam_t; ++ ++#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ ++#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ ++#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ ++ ++#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ ++#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ ++#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ ++/* 1.14: Clients can allocate/free a surface ++ */ ++typedef struct drm_radeon_surface_alloc { ++ unsigned int address; ++ unsigned int size; ++ unsigned int flags; ++} drm_radeon_surface_alloc_t; ++ ++typedef struct drm_radeon_surface_free { ++ unsigned int address; ++} drm_radeon_surface_free_t; ++ ++#define DRM_RADEON_VBLANK_CRTC1 1 ++#define DRM_RADEON_VBLANK_CRTC2 2 ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.c git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c +--- git/drivers/gpu/drm-tungsten/radeon_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,157 @@ ++/** ++ * \file radeon_drv.c ++ * ATI Radeon driver ++ * ++ * \author Gareth Hughes ++ */ ++ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++#include "drm_pciids.h" ++ ++int radeon_no_wb; ++ ++MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); ++module_param_named(no_wb, radeon_no_wb, int, 0444); ++ ++static int dri_library_name(struct drm_device * dev, char * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int family = dev_priv->flags & RADEON_FAMILY_MASK; ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", ++ (family < CHIP_R200) ? "radeon" : ++ ((family < CHIP_R300) ? "r200" : ++ "r300")); ++} ++ ++static int radeon_suspend(struct drm_device *dev, pm_message_t state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ /* Disable *all* interrupts */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ return 0; ++} ++ ++static int radeon_resume(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ /* Restore interrupt registers */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); ++ return 0; ++} ++ ++static struct pci_device_id pciidlist[] = { ++ radeon_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | ++ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, ++ .dev_priv_size = sizeof(drm_radeon_buf_priv_t), ++ .load = radeon_driver_load, ++ .firstopen = radeon_driver_firstopen, ++ .open = radeon_driver_open, ++ .preclose = radeon_driver_preclose, ++ .postclose = radeon_driver_postclose, ++ .lastclose = radeon_driver_lastclose, ++ .unload = radeon_driver_unload, ++ .suspend = radeon_suspend, ++ .resume = radeon_resume, ++ .get_vblank_counter = radeon_get_vblank_counter, ++ .enable_vblank = radeon_enable_vblank, ++ .disable_vblank = radeon_disable_vblank, ++ .dri_library_name = dri_library_name, ++ .irq_preinstall = radeon_driver_irq_preinstall, ++ .irq_postinstall = radeon_driver_irq_postinstall, ++ .irq_uninstall = radeon_driver_irq_uninstall, ++ .irq_handler = radeon_driver_irq_handler, ++ .reclaim_buffers = drm_core_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = radeon_ioctls, ++ .dma_ioctl = radeon_cp_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) ++ .compat_ioctl = radeon_compat_ioctl, ++#endif ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init radeon_init(void) ++{ ++ driver.num_ioctls = radeon_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit radeon_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(radeon_init); ++module_exit(radeon_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.h git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h +--- git/drivers/gpu/drm-tungsten/radeon_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1443 @@ ++/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*- ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Kevin E. Martin ++ * Gareth Hughes ++ */ ++ ++#ifndef __RADEON_DRV_H__ ++#define __RADEON_DRV_H__ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others." ++ ++#define DRIVER_NAME "radeon" ++#define DRIVER_DESC "ATI Radeon" ++#define DRIVER_DATE "20080613" ++ ++/* Interface history: ++ * ++ * 1.1 - ?? ++ * 1.2 - Add vertex2 ioctl (keith) ++ * - Add stencil capability to clear ioctl (gareth, keith) ++ * - Increase MAX_TEXTURE_LEVELS (brian) ++ * 1.3 - Add cmdbuf ioctl (keith) ++ * - Add support for new radeon packets (keith) ++ * - Add getparam ioctl (keith) ++ * - Add flip-buffers ioctl, deprecate fullscreen foo (keith). ++ * 1.4 - Add scratch registers to get_param ioctl. ++ * 1.5 - Add r200 packets to cmdbuf ioctl ++ * - Add r200 function to init ioctl ++ * - Add 'scalar2' instruction to cmdbuf ++ * 1.6 - Add static GART memory manager ++ * Add irq handler (won't be turned on unless X server knows to) ++ * Add irq ioctls and irq_active getparam. ++ * Add wait command for cmdbuf ioctl ++ * Add GART offset query for getparam ++ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5] ++ * and R200_PP_CUBIC_OFFSET_F1_[0..5]. ++ * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and ++ * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) ++ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith) ++ * Add 'GET' queries for starting additional clients on different VT's. ++ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl. ++ * Add texture rectangle support for r100. ++ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which ++ * clients use to tell the DRM where they think the framebuffer is ++ * located in the card's address space ++ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color ++ * and GL_EXT_blend_[func|equation]_separate on r200 ++ * 1.12- Add R300 CP microcode support - this just loads the CP on r300 ++ * (No 3D support yet - just microcode loading). ++ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters ++ * - Add hyperz support, add hyperz flags to clear ioctl. ++ * 1.14- Add support for color tiling ++ * - Add R100/R200 surface allocation/free support ++ * 1.15- Add support for texture micro tiling ++ * - Add support for r100 cube maps ++ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear ++ * texture filtering on r200 ++ * 1.17- Add initial support for R300 (3D). ++ * 1.18- Add support for GL_ATI_fragment_shader, new packets ++ * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces ++ * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR ++ * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) ++ * 1.19- Add support for gart table in FB memory and PCIE r300 ++ * 1.20- Add support for r300 texrect ++ * 1.21- Add support for card type getparam ++ * 1.22- Add support for texture cache flushes (R300_TX_CNTL) ++ * 1.23- Add new radeon memory map work from benh ++ * 1.24- Add general-purpose packet for manipulating scratch registers (r300) ++ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL, ++ * new packet type) ++ * 1.26- Add support for variable size PCI(E) gart aperture ++ * 1.27- Add support for IGP GART ++ * 1.28- Add support for VBL on CRTC2 ++ * 1.29- R500 3D cmd buffer support ++ */ ++ ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 29 ++#define DRIVER_PATCHLEVEL 0 ++ ++/* ++ * Radeon chip families ++ */ ++enum radeon_family { ++ CHIP_R100, ++ CHIP_RV100, ++ CHIP_RS100, ++ CHIP_RV200, ++ CHIP_RS200, ++ CHIP_R200, ++ CHIP_RV250, ++ CHIP_RS300, ++ CHIP_RV280, ++ CHIP_R300, ++ CHIP_R350, ++ CHIP_RV350, ++ CHIP_RV380, ++ CHIP_R420, ++ CHIP_RV410, ++ CHIP_RS400, ++ CHIP_RS480, ++ CHIP_RS690, ++ CHIP_RV515, ++ CHIP_R520, ++ CHIP_RV530, ++ CHIP_RV560, ++ CHIP_RV570, ++ CHIP_R580, ++ CHIP_LAST, ++}; ++ ++/* ++ * Chip flags ++ */ ++enum radeon_chip_flags { ++ RADEON_FAMILY_MASK = 0x0000ffffUL, ++ RADEON_FLAGS_MASK = 0xffff0000UL, ++ RADEON_IS_MOBILITY = 0x00010000UL, ++ RADEON_IS_IGP = 0x00020000UL, ++ RADEON_SINGLE_CRTC = 0x00040000UL, ++ RADEON_IS_AGP = 0x00080000UL, ++ RADEON_HAS_HIERZ = 0x00100000UL, ++ RADEON_IS_PCIE = 0x00200000UL, ++ RADEON_NEW_MEMMAP = 0x00400000UL, ++ RADEON_IS_PCI = 0x00800000UL, ++ RADEON_IS_IGPGART = 0x01000000UL, ++}; ++ ++#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ ++ DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR)) ++#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) ) ++ ++typedef struct drm_radeon_freelist { ++ unsigned int age; ++ struct drm_buf *buf; ++ struct drm_radeon_freelist *next; ++ struct drm_radeon_freelist *prev; ++} drm_radeon_freelist_t; ++ ++typedef struct drm_radeon_ring_buffer { ++ u32 *start; ++ u32 *end; ++ int size; /* Double Words */ ++ int size_l2qw; /* log2 Quad Words */ ++ ++ int rptr_update; /* Double Words */ ++ int rptr_update_l2qw; /* log2 Quad Words */ ++ ++ int fetch_size; /* Double Words */ ++ int fetch_size_l2ow; /* log2 Oct Words */ ++ ++ u32 tail; ++ u32 tail_mask; ++ int space; ++ ++ int high_mark; ++} drm_radeon_ring_buffer_t; ++ ++typedef struct drm_radeon_depth_clear_t { ++ u32 rb3d_cntl; ++ u32 rb3d_zstencilcntl; ++ u32 se_cntl; ++} drm_radeon_depth_clear_t; ++ ++struct drm_radeon_driver_file_fields { ++ int64_t radeon_fb_delta; ++}; ++ ++struct mem_block { ++ struct mem_block *next; ++ struct mem_block *prev; ++ int start; ++ int size; ++ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ ++}; ++ ++struct radeon_surface { ++ int refcount; ++ u32 lower; ++ u32 upper; ++ u32 flags; ++}; ++ ++struct radeon_virt_surface { ++ int surface_index; ++ u32 lower; ++ u32 upper; ++ u32 flags; ++ struct drm_file *file_priv; ++}; ++ ++#define RADEON_FLUSH_EMITED (1 < 0) ++#define RADEON_PURGE_EMITED (1 < 1) ++ ++typedef struct drm_radeon_private { ++ ++ drm_radeon_ring_buffer_t ring; ++ drm_radeon_sarea_t *sarea_priv; ++ ++ u32 fb_location; ++ u32 fb_size; ++ int new_memmap; ++ ++ int gart_size; ++ u32 gart_vm_start; ++ unsigned long gart_buffers_offset; ++ ++ int cp_mode; ++ int cp_running; ++ ++ drm_radeon_freelist_t *head; ++ drm_radeon_freelist_t *tail; ++ int last_buf; ++ volatile u32 *scratch; ++ int writeback_works; ++ ++ int usec_timeout; ++ ++ struct { ++ u32 boxes; ++ int freelist_timeouts; ++ int freelist_loops; ++ int requested_bufs; ++ int last_frame_reads; ++ int last_clear_reads; ++ int clears; ++ int texture_uploads; ++ } stats; ++ ++ int do_boxes; ++ int page_flipping; ++ ++ u32 color_fmt; ++ unsigned int front_offset; ++ unsigned int front_pitch; ++ unsigned int back_offset; ++ unsigned int back_pitch; ++ ++ u32 depth_fmt; ++ unsigned int depth_offset; ++ unsigned int depth_pitch; ++ ++ u32 front_pitch_offset; ++ u32 back_pitch_offset; ++ u32 depth_pitch_offset; ++ ++ drm_radeon_depth_clear_t depth_clear; ++ ++ unsigned long ring_offset; ++ unsigned long ring_rptr_offset; ++ unsigned long buffers_offset; ++ unsigned long gart_textures_offset; ++ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *cp_ring; ++ drm_local_map_t *ring_rptr; ++ drm_local_map_t *gart_textures; ++ ++ struct mem_block *gart_heap; ++ struct mem_block *fb_heap; ++ ++ /* SW interrupt */ ++ wait_queue_head_t swi_queue; ++ atomic_t swi_emitted; ++ int vblank_crtc; ++ uint32_t irq_enable_reg; ++ int irq_enabled; ++ uint32_t r500_disp_irq_reg; ++ ++ struct radeon_surface surfaces[RADEON_MAX_SURFACES]; ++ struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; ++ ++ unsigned long pcigart_offset; ++ unsigned int pcigart_offset_set; ++ struct drm_ati_pcigart_info gart_info; ++ ++ u32 scratch_ages[5]; ++ ++ unsigned int crtc_last_cnt; ++ unsigned int crtc2_last_cnt; ++ ++ /* starting from here on, data is preserved accross an open */ ++ uint32_t flags; /* see radeon_chip_flags */ ++ unsigned long fb_aper_offset; ++ ++ int num_gb_pipes; ++ int track_flush; ++ uint32_t chip_family; /* extract from flags */ ++} drm_radeon_private_t; ++ ++typedef struct drm_radeon_buf_priv { ++ u32 age; ++} drm_radeon_buf_priv_t; ++ ++typedef struct drm_radeon_kcmd_buffer { ++ int bufsz; ++ char *buf; ++ int nbox; ++ struct drm_clip_rect __user *boxes; ++} drm_radeon_kcmd_buffer_t; ++ ++extern int radeon_no_wb; ++extern struct drm_ioctl_desc radeon_ioctls[]; ++extern int radeon_max_ioctl; ++ ++/* Check whether the given hardware address is inside the framebuffer or the ++ * GART area. ++ */ ++static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, ++ u64 off) ++{ ++ u32 fb_start = dev_priv->fb_location; ++ u32 fb_end = fb_start + dev_priv->fb_size - 1; ++ u32 gart_start = dev_priv->gart_vm_start; ++ u32 gart_end = gart_start + dev_priv->gart_size - 1; ++ ++ return ((off >= fb_start && off <= fb_end) || ++ (off >= gart_start && off <= gart_end)); ++} ++ ++ /* radeon_cp.c */ ++extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); ++ ++extern void radeon_freelist_reset(struct drm_device * dev); ++extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); ++ ++extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n); ++ ++extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); ++ ++extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern void radeon_mem_takedown(struct mem_block **heap); ++extern void radeon_mem_release(struct drm_file *file_priv, ++ struct mem_block *heap); ++ ++ /* radeon_irq.c */ ++extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); ++extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++extern void radeon_do_release(struct drm_device * dev); ++extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); ++extern int radeon_enable_vblank(struct drm_device *dev, int crtc); ++extern void radeon_disable_vblank(struct drm_device *dev, int crtc); ++extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); ++extern void radeon_driver_irq_preinstall(struct drm_device * dev); ++extern int radeon_driver_irq_postinstall(struct drm_device * dev); ++extern void radeon_driver_irq_uninstall(struct drm_device * dev); ++extern int radeon_vblank_crtc_get(struct drm_device *dev); ++extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); ++ ++extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); ++extern int radeon_driver_unload(struct drm_device *dev); ++extern int radeon_driver_firstopen(struct drm_device *dev); ++extern void radeon_driver_preclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void radeon_driver_postclose(struct drm_device * dev, ++ struct drm_file *file_priv); ++extern void radeon_driver_lastclose(struct drm_device * dev); ++extern int radeon_driver_open(struct drm_device * dev, ++ struct drm_file * file_priv); ++extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, ++ unsigned long arg); ++ ++/* r300_cmdbuf.c */ ++extern void r300_init_reg_flags(struct drm_device *dev); ++ ++extern int r300_do_cp_cmdbuf(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf); ++ ++/* Flags for stats.boxes ++ */ ++#define RADEON_BOX_DMA_IDLE 0x1 ++#define RADEON_BOX_RING_FULL 0x2 ++#define RADEON_BOX_FLIP 0x4 ++#define RADEON_BOX_WAIT_IDLE 0x8 ++#define RADEON_BOX_TEXTURE_LOAD 0x10 ++ ++/* Register definitions, register access macros and drmAddMap constants ++ * for Radeon kernel driver. ++ */ ++#define RADEON_AGP_COMMAND 0x0f60 ++#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ ++# define RADEON_AGP_ENABLE (1<<8) ++#define RADEON_AUX_SCISSOR_CNTL 0x26f0 ++# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) ++# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) ++# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26) ++# define RADEON_SCISSOR_0_ENABLE (1 << 28) ++# define RADEON_SCISSOR_1_ENABLE (1 << 29) ++# define RADEON_SCISSOR_2_ENABLE (1 << 30) ++ ++#define RADEON_BUS_CNTL 0x0030 ++# define RADEON_BUS_MASTER_DIS (1 << 6) ++ ++#define RADEON_CLOCK_CNTL_DATA 0x000c ++# define RADEON_PLL_WR_EN (1 << 7) ++#define RADEON_CLOCK_CNTL_INDEX 0x0008 ++#define RADEON_CONFIG_APER_SIZE 0x0108 ++#define RADEON_CONFIG_MEMSIZE 0x00f8 ++#define RADEON_CRTC_OFFSET 0x0224 ++#define RADEON_CRTC_OFFSET_CNTL 0x0228 ++# define RADEON_CRTC_TILE_EN (1 << 15) ++# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) ++#define RADEON_CRTC2_OFFSET 0x0324 ++#define RADEON_CRTC2_OFFSET_CNTL 0x0328 ++ ++#define RADEON_PCIE_INDEX 0x0030 ++#define RADEON_PCIE_DATA 0x0034 ++#define RADEON_PCIE_TX_GART_CNTL 0x10 ++# define RADEON_PCIE_TX_GART_EN (1 << 0) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1) ++# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1) ++# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3) ++# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3) ++# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5) ++# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8) ++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 ++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 ++#define RADEON_PCIE_TX_GART_BASE 0x13 ++#define RADEON_PCIE_TX_GART_START_LO 0x14 ++#define RADEON_PCIE_TX_GART_START_HI 0x15 ++#define RADEON_PCIE_TX_GART_END_LO 0x16 ++#define RADEON_PCIE_TX_GART_END_HI 0x17 ++ ++#define RS480_NB_MC_INDEX 0x168 ++# define RS480_NB_MC_IND_WR_EN (1 << 8) ++#define RS480_NB_MC_DATA 0x16c ++ ++#define RS690_MC_INDEX 0x78 ++# define RS690_MC_INDEX_MASK 0x1ff ++# define RS690_MC_INDEX_WR_EN (1 << 9) ++# define RS690_MC_INDEX_WR_ACK 0x7f ++#define RS690_MC_DATA 0x7c ++ ++/* MC indirect registers */ ++#define RS480_MC_MISC_CNTL 0x18 ++# define RS480_DISABLE_GTW (1 << 1) ++/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */ ++# define RS480_GART_INDEX_REG_EN (1 << 12) ++# define RS690_BLOCK_GFX_D3_EN (1 << 14) ++#define RS480_K8_FB_LOCATION 0x1e ++#define RS480_GART_FEATURE_ID 0x2b ++# define RS480_HANG_EN (1 << 11) ++# define RS480_TLB_ENABLE (1 << 18) ++# define RS480_P2P_ENABLE (1 << 19) ++# define RS480_GTW_LAC_EN (1 << 25) ++# define RS480_2LEVEL_GART (0 << 30) ++# define RS480_1LEVEL_GART (1 << 30) ++# define RS480_PDC_EN (1 << 31) ++#define RS480_GART_BASE 0x2c ++#define RS480_GART_CACHE_CNTRL 0x2e ++# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */ ++#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38 ++# define RS480_GART_EN (1 << 0) ++# define RS480_VA_SIZE_32MB (0 << 1) ++# define RS480_VA_SIZE_64MB (1 << 1) ++# define RS480_VA_SIZE_128MB (2 << 1) ++# define RS480_VA_SIZE_256MB (3 << 1) ++# define RS480_VA_SIZE_512MB (4 << 1) ++# define RS480_VA_SIZE_1GB (5 << 1) ++# define RS480_VA_SIZE_2GB (6 << 1) ++#define RS480_AGP_MODE_CNTL 0x39 ++# define RS480_POST_GART_Q_SIZE (1 << 18) ++# define RS480_NONGART_SNOOP (1 << 19) ++# define RS480_AGP_RD_BUF_SIZE (1 << 20) ++# define RS480_REQ_TYPE_SNOOP_SHIFT 22 ++# define RS480_REQ_TYPE_SNOOP_MASK 0x3 ++# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24) ++#define RS480_MC_MISC_UMA_CNTL 0x5f ++#define RS480_MC_MCLK_CNTL 0x7a ++#define RS480_MC_UMA_DUALCH_CNTL 0x86 ++ ++#define RS690_MC_FB_LOCATION 0x100 ++#define RS690_MC_AGP_LOCATION 0x101 ++#define RS690_MC_AGP_BASE 0x102 ++#define RS690_MC_AGP_BASE_2 0x103 ++ ++#define R520_MC_IND_INDEX 0x70 ++#define R520_MC_IND_WR_EN (1 << 24) ++#define R520_MC_IND_DATA 0x74 ++ ++#define RV515_MC_FB_LOCATION 0x01 ++#define RV515_MC_AGP_LOCATION 0x02 ++#define RV515_MC_AGP_BASE 0x03 ++#define RV515_MC_AGP_BASE_2 0x04 ++ ++#define R520_MC_FB_LOCATION 0x04 ++#define R520_MC_AGP_LOCATION 0x05 ++#define R520_MC_AGP_BASE 0x06 ++#define R520_MC_AGP_BASE_2 0x07 ++ ++#define RADEON_MPP_TB_CONFIG 0x01c0 ++#define RADEON_MEM_CNTL 0x0140 ++#define RADEON_MEM_SDRAM_MODE_REG 0x0158 ++#define RADEON_AGP_BASE_2 0x015c /* r200+ only */ ++#define RS480_AGP_BASE_2 0x0164 ++#define RADEON_AGP_BASE 0x0170 ++ ++/* pipe config regs */ ++#define R400_GB_PIPE_SELECT 0x402c ++#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ ++#define R500_SU_REG_DEST 0x42c8 ++#define R300_GB_TILE_CONFIG 0x4018 ++# define R300_ENABLE_TILING (1 << 0) ++# define R300_PIPE_COUNT_RV350 (0 << 1) ++# define R300_PIPE_COUNT_R300 (3 << 1) ++# define R300_PIPE_COUNT_R420_3P (6 << 1) ++# define R300_PIPE_COUNT_R420 (7 << 1) ++# define R300_TILE_SIZE_8 (0 << 4) ++# define R300_TILE_SIZE_16 (1 << 4) ++# define R300_TILE_SIZE_32 (2 << 4) ++# define R300_SUBPIXEL_1_12 (0 << 16) ++# define R300_SUBPIXEL_1_16 (1 << 16) ++#define R300_DST_PIPE_CONFIG 0x170c ++# define R300_PIPE_AUTO_CONFIG (1 << 31) ++#define R300_RB2D_DSTCACHE_MODE 0x3428 ++# define R300_DC_AUTOFLUSH_ENABLE (1 << 8) ++# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17) ++ ++#define RADEON_RB3D_COLOROFFSET 0x1c40 ++#define RADEON_RB3D_COLORPITCH 0x1c48 ++ ++#define RADEON_SRC_X_Y 0x1590 ++ ++#define RADEON_DP_GUI_MASTER_CNTL 0x146c ++# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) ++# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) ++# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4) ++# define RADEON_GMC_BRUSH_NONE (15 << 4) ++# define RADEON_GMC_DST_16BPP (4 << 8) ++# define RADEON_GMC_DST_24BPP (5 << 8) ++# define RADEON_GMC_DST_32BPP (6 << 8) ++# define RADEON_GMC_DST_DATATYPE_SHIFT 8 ++# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12) ++# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24) ++# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24) ++# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28) ++# define RADEON_GMC_WR_MSK_DIS (1 << 30) ++# define RADEON_ROP3_S 0x00cc0000 ++# define RADEON_ROP3_P 0x00f00000 ++#define RADEON_DP_WRITE_MASK 0x16cc ++#define RADEON_SRC_PITCH_OFFSET 0x1428 ++#define RADEON_DST_PITCH_OFFSET 0x142c ++#define RADEON_DST_PITCH_OFFSET_C 0x1c80 ++# define RADEON_DST_TILE_LINEAR (0 << 30) ++# define RADEON_DST_TILE_MACRO (1 << 30) ++# define RADEON_DST_TILE_MICRO (2 << 30) ++# define RADEON_DST_TILE_BOTH (3 << 30) ++ ++#define RADEON_SCRATCH_REG0 0x15e0 ++#define RADEON_SCRATCH_REG1 0x15e4 ++#define RADEON_SCRATCH_REG2 0x15e8 ++#define RADEON_SCRATCH_REG3 0x15ec ++#define RADEON_SCRATCH_REG4 0x15f0 ++#define RADEON_SCRATCH_REG5 0x15f4 ++#define RADEON_SCRATCH_UMSK 0x0770 ++#define RADEON_SCRATCH_ADDR 0x0774 ++ ++#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x)) ++ ++#define GET_SCRATCH( x ) (dev_priv->writeback_works \ ++ ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ ++ : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) ++ ++#define RADEON_CRTC_CRNT_FRAME 0x0214 ++#define RADEON_CRTC2_CRNT_FRAME 0x0314 ++ ++#define RADEON_CRTC_STATUS 0x005c ++#define RADEON_CRTC2_STATUS 0x03fc ++ ++#define RADEON_GEN_INT_CNTL 0x0040 ++# define RADEON_CRTC_VBLANK_MASK (1 << 0) ++# define RADEON_CRTC2_VBLANK_MASK (1 << 9) ++# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19) ++# define RADEON_SW_INT_ENABLE (1 << 25) ++ ++#define RADEON_GEN_INT_STATUS 0x0044 ++# define RADEON_CRTC_VBLANK_STAT (1 << 0) ++# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) ++# define RADEON_CRTC2_VBLANK_STAT (1 << 9) ++# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) ++# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) ++# define RADEON_SW_INT_TEST (1 << 25) ++# define RADEON_SW_INT_TEST_ACK (1 << 25) ++# define RADEON_SW_INT_FIRE (1 << 26) ++# define R500_DISPLAY_INT_STATUS (1 << 0) ++ ++ ++#define RADEON_HOST_PATH_CNTL 0x0130 ++# define RADEON_HDP_SOFT_RESET (1 << 26) ++# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28) ++# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28) ++ ++#define RADEON_ISYNC_CNTL 0x1724 ++# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0) ++# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1) ++# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2) ++# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3) ++# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4) ++# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5) ++ ++#define RADEON_RBBM_GUICNTL 0x172c ++# define RADEON_HOST_DATA_SWAP_NONE (0 << 0) ++# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0) ++# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0) ++# define RADEON_HOST_DATA_SWAP_HDW (3 << 0) ++ ++#define RADEON_MC_AGP_LOCATION 0x014c ++#define RADEON_MC_FB_LOCATION 0x0148 ++#define RADEON_MCLK_CNTL 0x0012 ++# define RADEON_FORCEON_MCLKA (1 << 16) ++# define RADEON_FORCEON_MCLKB (1 << 17) ++# define RADEON_FORCEON_YCLKA (1 << 18) ++# define RADEON_FORCEON_YCLKB (1 << 19) ++# define RADEON_FORCEON_MC (1 << 20) ++# define RADEON_FORCEON_AIC (1 << 21) ++ ++#define RADEON_PP_BORDER_COLOR_0 0x1d40 ++#define RADEON_PP_BORDER_COLOR_1 0x1d44 ++#define RADEON_PP_BORDER_COLOR_2 0x1d48 ++#define RADEON_PP_CNTL 0x1c38 ++# define RADEON_SCISSOR_ENABLE (1 << 1) ++#define RADEON_PP_LUM_MATRIX 0x1d00 ++#define RADEON_PP_MISC 0x1c14 ++#define RADEON_PP_ROT_MATRIX_0 0x1d58 ++#define RADEON_PP_TXFILTER_0 0x1c54 ++#define RADEON_PP_TXOFFSET_0 0x1c5c ++#define RADEON_PP_TXFILTER_1 0x1c6c ++#define RADEON_PP_TXFILTER_2 0x1c84 ++ ++#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */ ++#define R300_DSTCACHE_CTLSTAT 0x1714 ++# define R300_RB2D_DC_FLUSH (3 << 0) ++# define R300_RB2D_DC_FREE (3 << 2) ++# define R300_RB2D_DC_FLUSH_ALL 0xf ++# define R300_RB2D_DC_BUSY (1 << 31) ++#define RADEON_RB3D_CNTL 0x1c3c ++# define RADEON_ALPHA_BLEND_ENABLE (1 << 0) ++# define RADEON_PLANE_MASK_ENABLE (1 << 1) ++# define RADEON_DITHER_ENABLE (1 << 2) ++# define RADEON_ROUND_ENABLE (1 << 3) ++# define RADEON_SCALE_DITHER_ENABLE (1 << 4) ++# define RADEON_DITHER_INIT (1 << 5) ++# define RADEON_ROP_ENABLE (1 << 6) ++# define RADEON_STENCIL_ENABLE (1 << 7) ++# define RADEON_Z_ENABLE (1 << 8) ++# define RADEON_ZBLOCK16 (1 << 15) ++#define RADEON_RB3D_DEPTHOFFSET 0x1c24 ++#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230 ++#define RADEON_RB3D_DEPTHPITCH 0x1c28 ++#define RADEON_RB3D_PLANEMASK 0x1d84 ++#define RADEON_RB3D_STENCILREFMASK 0x1d7c ++#define RADEON_RB3D_ZCACHE_MODE 0x3250 ++#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254 ++# define RADEON_RB3D_ZC_FLUSH (1 << 0) ++# define RADEON_RB3D_ZC_FREE (1 << 2) ++# define RADEON_RB3D_ZC_FLUSH_ALL 0x5 ++# define RADEON_RB3D_ZC_BUSY (1 << 31) ++#define R300_ZB_ZCACHE_CTLSTAT 0x4f18 ++# define R300_ZC_FLUSH (1 << 0) ++# define R300_ZC_FREE (1 << 1) ++# define R300_ZC_BUSY (1 << 31) ++#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c ++# define RADEON_RB3D_DC_FLUSH (3 << 0) ++# define RADEON_RB3D_DC_FREE (3 << 2) ++# define RADEON_RB3D_DC_FLUSH_ALL 0xf ++# define RADEON_RB3D_DC_BUSY (1 << 31) ++#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c ++# define R300_RB3D_DC_FLUSH (2 << 0) ++# define R300_RB3D_DC_FREE (2 << 2) ++# define R300_RB3D_DC_FINISH (1 << 4) ++#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c ++# define RADEON_Z_TEST_MASK (7 << 4) ++# define RADEON_Z_TEST_ALWAYS (7 << 4) ++# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) ++# define RADEON_STENCIL_TEST_ALWAYS (7 << 12) ++# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) ++# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) ++# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) ++# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) ++# define RADEON_FORCE_Z_DIRTY (1 << 29) ++# define RADEON_Z_WRITE_ENABLE (1 << 30) ++# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) ++#define RADEON_RBBM_SOFT_RESET 0x00f0 ++# define RADEON_SOFT_RESET_CP (1 << 0) ++# define RADEON_SOFT_RESET_HI (1 << 1) ++# define RADEON_SOFT_RESET_SE (1 << 2) ++# define RADEON_SOFT_RESET_RE (1 << 3) ++# define RADEON_SOFT_RESET_PP (1 << 4) ++# define RADEON_SOFT_RESET_E2 (1 << 5) ++# define RADEON_SOFT_RESET_RB (1 << 6) ++# define RADEON_SOFT_RESET_HDP (1 << 7) ++/* ++ * 6:0 Available slots in the FIFO ++ * 8 Host Interface active ++ * 9 CP request active ++ * 10 FIFO request active ++ * 11 Host Interface retry active ++ * 12 CP retry active ++ * 13 FIFO retry active ++ * 14 FIFO pipeline busy ++ * 15 Event engine busy ++ * 16 CP command stream busy ++ * 17 2D engine busy ++ * 18 2D portion of render backend busy ++ * 20 3D setup engine busy ++ * 26 GA engine busy ++ * 27 CBA 2D engine busy ++ * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or ++ * command stream queue not empty or Ring Buffer not empty ++ */ ++#define RADEON_RBBM_STATUS 0x0e40 ++/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */ ++/* #define RADEON_RBBM_STATUS 0x1740 */ ++/* bits 6:0 are dword slots available in the cmd fifo */ ++# define RADEON_RBBM_FIFOCNT_MASK 0x007f ++# define RADEON_HIRQ_ON_RBB (1 << 8) ++# define RADEON_CPRQ_ON_RBB (1 << 9) ++# define RADEON_CFRQ_ON_RBB (1 << 10) ++# define RADEON_HIRQ_IN_RTBUF (1 << 11) ++# define RADEON_CPRQ_IN_RTBUF (1 << 12) ++# define RADEON_CFRQ_IN_RTBUF (1 << 13) ++# define RADEON_PIPE_BUSY (1 << 14) ++# define RADEON_ENG_EV_BUSY (1 << 15) ++# define RADEON_CP_CMDSTRM_BUSY (1 << 16) ++# define RADEON_E2_BUSY (1 << 17) ++# define RADEON_RB2D_BUSY (1 << 18) ++# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */ ++# define RADEON_VAP_BUSY (1 << 20) ++# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */ ++# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */ ++# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */ ++# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */ ++# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */ ++# define RADEON_GA_BUSY (1 << 26) ++# define RADEON_CBA2D_BUSY (1 << 27) ++# define RADEON_RBBM_ACTIVE (1 << 31) ++#define RADEON_RE_LINE_PATTERN 0x1cd0 ++#define RADEON_RE_MISC 0x26c4 ++#define RADEON_RE_TOP_LEFT 0x26c0 ++#define RADEON_RE_WIDTH_HEIGHT 0x1c44 ++#define RADEON_RE_STIPPLE_ADDR 0x1cc8 ++#define RADEON_RE_STIPPLE_DATA 0x1ccc ++ ++#define RADEON_SCISSOR_TL_0 0x1cd8 ++#define RADEON_SCISSOR_BR_0 0x1cdc ++#define RADEON_SCISSOR_TL_1 0x1ce0 ++#define RADEON_SCISSOR_BR_1 0x1ce4 ++#define RADEON_SCISSOR_TL_2 0x1ce8 ++#define RADEON_SCISSOR_BR_2 0x1cec ++#define RADEON_SE_COORD_FMT 0x1c50 ++#define RADEON_SE_CNTL 0x1c4c ++# define RADEON_FFACE_CULL_CW (0 << 0) ++# define RADEON_BFACE_SOLID (3 << 1) ++# define RADEON_FFACE_SOLID (3 << 3) ++# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6) ++# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8) ++# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8) ++# define RADEON_ALPHA_SHADE_FLAT (1 << 10) ++# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10) ++# define RADEON_SPECULAR_SHADE_FLAT (1 << 12) ++# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12) ++# define RADEON_FOG_SHADE_FLAT (1 << 14) ++# define RADEON_FOG_SHADE_GOURAUD (2 << 14) ++# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24) ++# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25) ++# define RADEON_VTX_PIX_CENTER_OGL (1 << 27) ++# define RADEON_ROUND_MODE_TRUNC (0 << 28) ++# define RADEON_ROUND_PREC_8TH_PIX (1 << 30) ++#define RADEON_SE_CNTL_STATUS 0x2140 ++#define RADEON_SE_LINE_WIDTH 0x1db8 ++#define RADEON_SE_VPORT_XSCALE 0x1d98 ++#define RADEON_SE_ZBIAS_FACTOR 0x1db0 ++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210 ++#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254 ++#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200 ++# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16 ++# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28 ++#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204 ++#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208 ++# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16 ++#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C ++#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8 ++#define RADEON_SURFACE_ACCESS_CLR 0x0bfc ++#define RADEON_SURFACE_CNTL 0x0b00 ++# define RADEON_SURF_TRANSLATION_DIS (1 << 8) ++# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20) ++# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20) ++# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20) ++# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20) ++# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22) ++# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22) ++# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22) ++# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22) ++#define RADEON_SURFACE0_INFO 0x0b0c ++# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0) ++# define RADEON_SURF_TILE_MODE_MASK (3 << 16) ++# define RADEON_SURF_TILE_MODE_MACRO (0 << 16) ++# define RADEON_SURF_TILE_MODE_MICRO (1 << 16) ++# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16) ++# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16) ++#define RADEON_SURFACE0_LOWER_BOUND 0x0b04 ++#define RADEON_SURFACE0_UPPER_BOUND 0x0b08 ++# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0) ++#define RADEON_SURFACE1_INFO 0x0b1c ++#define RADEON_SURFACE1_LOWER_BOUND 0x0b14 ++#define RADEON_SURFACE1_UPPER_BOUND 0x0b18 ++#define RADEON_SURFACE2_INFO 0x0b2c ++#define RADEON_SURFACE2_LOWER_BOUND 0x0b24 ++#define RADEON_SURFACE2_UPPER_BOUND 0x0b28 ++#define RADEON_SURFACE3_INFO 0x0b3c ++#define RADEON_SURFACE3_LOWER_BOUND 0x0b34 ++#define RADEON_SURFACE3_UPPER_BOUND 0x0b38 ++#define RADEON_SURFACE4_INFO 0x0b4c ++#define RADEON_SURFACE4_LOWER_BOUND 0x0b44 ++#define RADEON_SURFACE4_UPPER_BOUND 0x0b48 ++#define RADEON_SURFACE5_INFO 0x0b5c ++#define RADEON_SURFACE5_LOWER_BOUND 0x0b54 ++#define RADEON_SURFACE5_UPPER_BOUND 0x0b58 ++#define RADEON_SURFACE6_INFO 0x0b6c ++#define RADEON_SURFACE6_LOWER_BOUND 0x0b64 ++#define RADEON_SURFACE6_UPPER_BOUND 0x0b68 ++#define RADEON_SURFACE7_INFO 0x0b7c ++#define RADEON_SURFACE7_LOWER_BOUND 0x0b74 ++#define RADEON_SURFACE7_UPPER_BOUND 0x0b78 ++#define RADEON_SW_SEMAPHORE 0x013c ++ ++#define RADEON_WAIT_UNTIL 0x1720 ++# define RADEON_WAIT_CRTC_PFLIP (1 << 0) ++# define RADEON_WAIT_2D_IDLE (1 << 14) ++# define RADEON_WAIT_3D_IDLE (1 << 15) ++# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) ++# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) ++# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) ++ ++#define RADEON_RB3D_ZMASKOFFSET 0x3234 ++#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c ++# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) ++# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) ++ ++/* CP registers */ ++#define RADEON_CP_ME_RAM_ADDR 0x07d4 ++#define RADEON_CP_ME_RAM_RADDR 0x07d8 ++#define RADEON_CP_ME_RAM_DATAH 0x07dc ++#define RADEON_CP_ME_RAM_DATAL 0x07e0 ++ ++#define RADEON_CP_RB_BASE 0x0700 ++#define RADEON_CP_RB_CNTL 0x0704 ++# define RADEON_BUF_SWAP_32BIT (2 << 16) ++# define RADEON_RB_NO_UPDATE (1 << 27) ++#define RADEON_CP_RB_RPTR_ADDR 0x070c ++#define RADEON_CP_RB_RPTR 0x0710 ++#define RADEON_CP_RB_WPTR 0x0714 ++ ++#define RADEON_CP_RB_WPTR_DELAY 0x0718 ++# define RADEON_PRE_WRITE_TIMER_SHIFT 0 ++# define RADEON_PRE_WRITE_LIMIT_SHIFT 23 ++ ++#define RADEON_CP_IB_BASE 0x0738 ++ ++#define RADEON_CP_CSQ_CNTL 0x0740 ++# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0) ++# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28) ++# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28) ++# define RADEON_CSQ_PRIBM_INDDIS (2 << 28) ++# define RADEON_CSQ_PRIPIO_INDBM (3 << 28) ++# define RADEON_CSQ_PRIBM_INDBM (4 << 28) ++# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) ++ ++#define RADEON_AIC_CNTL 0x01d0 ++# define RADEON_PCIGART_TRANSLATE_EN (1 << 0) ++#define RADEON_AIC_STAT 0x01d4 ++#define RADEON_AIC_PT_BASE 0x01d8 ++#define RADEON_AIC_LO_ADDR 0x01dc ++#define RADEON_AIC_HI_ADDR 0x01e0 ++#define RADEON_AIC_TLB_ADDR 0x01e4 ++#define RADEON_AIC_TLB_DATA 0x01e8 ++ ++/* CP command packets */ ++#define RADEON_CP_PACKET0 0x00000000 ++# define RADEON_ONE_REG_WR (1 << 15) ++#define RADEON_CP_PACKET1 0x40000000 ++#define RADEON_CP_PACKET2 0x80000000 ++#define RADEON_CP_PACKET3 0xC0000000 ++# define RADEON_CP_NOP 0x00001000 ++# define RADEON_CP_NEXT_CHAR 0x00001900 ++# define RADEON_CP_PLY_NEXTSCAN 0x00001D00 ++# define RADEON_CP_SET_SCISSORS 0x00001E00 ++ /* GEN_INDX_PRIM is unsupported starting with R300 */ ++# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 ++# define RADEON_WAIT_FOR_IDLE 0x00002600 ++# define RADEON_3D_DRAW_VBUF 0x00002800 ++# define RADEON_3D_DRAW_IMMD 0x00002900 ++# define RADEON_3D_DRAW_INDX 0x00002A00 ++# define RADEON_CP_LOAD_PALETTE 0x00002C00 ++# define RADEON_3D_LOAD_VBPNTR 0x00002F00 ++# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 ++# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 ++# define RADEON_3D_CLEAR_ZMASK 0x00003200 ++# define RADEON_CP_INDX_BUFFER 0x00003300 ++# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400 ++# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500 ++# define RADEON_CP_3D_DRAW_INDX_2 0x00003600 ++# define RADEON_3D_CLEAR_HIZ 0x00003700 ++# define RADEON_CP_3D_CLEAR_CMASK 0x00003802 ++# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 ++# define RADEON_CNTL_PAINT_MULTI 0x00009A00 ++# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 ++# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 ++ ++#define RADEON_CP_PACKET_MASK 0xC0000000 ++#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 ++#define RADEON_CP_PACKET0_REG_MASK 0x000007ff ++#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff ++#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 ++ ++#define RADEON_VTX_Z_PRESENT (1 << 31) ++#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3) ++ ++#define RADEON_PRIM_TYPE_NONE (0 << 0) ++#define RADEON_PRIM_TYPE_POINT (1 << 0) ++#define RADEON_PRIM_TYPE_LINE (2 << 0) ++#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0) ++#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0) ++#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0) ++#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0) ++#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0) ++#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0) ++#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) ++#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) ++#define RADEON_PRIM_TYPE_MASK 0xf ++#define RADEON_PRIM_WALK_IND (1 << 4) ++#define RADEON_PRIM_WALK_LIST (2 << 4) ++#define RADEON_PRIM_WALK_RING (3 << 4) ++#define RADEON_COLOR_ORDER_BGRA (0 << 6) ++#define RADEON_COLOR_ORDER_RGBA (1 << 6) ++#define RADEON_MAOS_ENABLE (1 << 7) ++#define RADEON_VTX_FMT_R128_MODE (0 << 8) ++#define RADEON_VTX_FMT_RADEON_MODE (1 << 8) ++#define RADEON_NUM_VERTICES_SHIFT 16 ++ ++#define RADEON_COLOR_FORMAT_CI8 2 ++#define RADEON_COLOR_FORMAT_ARGB1555 3 ++#define RADEON_COLOR_FORMAT_RGB565 4 ++#define RADEON_COLOR_FORMAT_ARGB8888 6 ++#define RADEON_COLOR_FORMAT_RGB332 7 ++#define RADEON_COLOR_FORMAT_RGB8 9 ++#define RADEON_COLOR_FORMAT_ARGB4444 15 ++ ++#define RADEON_TXFORMAT_I8 0 ++#define RADEON_TXFORMAT_AI88 1 ++#define RADEON_TXFORMAT_RGB332 2 ++#define RADEON_TXFORMAT_ARGB1555 3 ++#define RADEON_TXFORMAT_RGB565 4 ++#define RADEON_TXFORMAT_ARGB4444 5 ++#define RADEON_TXFORMAT_ARGB8888 6 ++#define RADEON_TXFORMAT_RGBA8888 7 ++#define RADEON_TXFORMAT_Y8 8 ++#define RADEON_TXFORMAT_VYUY422 10 ++#define RADEON_TXFORMAT_YVYU422 11 ++#define RADEON_TXFORMAT_DXT1 12 ++#define RADEON_TXFORMAT_DXT23 14 ++#define RADEON_TXFORMAT_DXT45 15 ++ ++#define R200_PP_TXCBLEND_0 0x2f00 ++#define R200_PP_TXCBLEND_1 0x2f10 ++#define R200_PP_TXCBLEND_2 0x2f20 ++#define R200_PP_TXCBLEND_3 0x2f30 ++#define R200_PP_TXCBLEND_4 0x2f40 ++#define R200_PP_TXCBLEND_5 0x2f50 ++#define R200_PP_TXCBLEND_6 0x2f60 ++#define R200_PP_TXCBLEND_7 0x2f70 ++#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268 ++#define R200_PP_TFACTOR_0 0x2ee0 ++#define R200_SE_VTX_FMT_0 0x2088 ++#define R200_SE_VAP_CNTL 0x2080 ++#define R200_SE_TCL_MATRIX_SEL_0 0x2230 ++#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8 ++#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0 ++#define R200_PP_TXFILTER_5 0x2ca0 ++#define R200_PP_TXFILTER_4 0x2c80 ++#define R200_PP_TXFILTER_3 0x2c60 ++#define R200_PP_TXFILTER_2 0x2c40 ++#define R200_PP_TXFILTER_1 0x2c20 ++#define R200_PP_TXFILTER_0 0x2c00 ++#define R200_PP_TXOFFSET_5 0x2d78 ++#define R200_PP_TXOFFSET_4 0x2d60 ++#define R200_PP_TXOFFSET_3 0x2d48 ++#define R200_PP_TXOFFSET_2 0x2d30 ++#define R200_PP_TXOFFSET_1 0x2d18 ++#define R200_PP_TXOFFSET_0 0x2d00 ++ ++#define R200_PP_CUBIC_FACES_0 0x2c18 ++#define R200_PP_CUBIC_FACES_1 0x2c38 ++#define R200_PP_CUBIC_FACES_2 0x2c58 ++#define R200_PP_CUBIC_FACES_3 0x2c78 ++#define R200_PP_CUBIC_FACES_4 0x2c98 ++#define R200_PP_CUBIC_FACES_5 0x2cb8 ++#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 ++#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 ++#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c ++#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 ++#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 ++#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c ++#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 ++#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 ++#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 ++#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c ++#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 ++#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 ++#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c ++#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 ++#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 ++#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c ++#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 ++#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 ++#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 ++#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c ++#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 ++#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 ++#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c ++#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 ++#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 ++#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c ++#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 ++#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 ++#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 ++#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c ++ ++#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 ++#define R200_SE_VTE_CNTL 0x20b0 ++#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250 ++#define R200_PP_TAM_DEBUG3 0x2d9c ++#define R200_PP_CNTL_X 0x2cc4 ++#define R200_SE_VAP_CNTL_STATUS 0x2140 ++#define R200_RE_SCISSOR_TL_0 0x1cd8 ++#define R200_RE_SCISSOR_TL_1 0x1ce0 ++#define R200_RE_SCISSOR_TL_2 0x1ce8 ++#define R200_RB3D_DEPTHXY_OFFSET 0x1d60 ++#define R200_RE_AUX_SCISSOR_CNTL 0x26f0 ++#define R200_SE_VTX_STATE_CNTL 0x2180 ++#define R200_RE_POINTSIZE 0x2648 ++#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254 ++ ++#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */ ++#define RADEON_PP_TEX_SIZE_1 0x1d0c ++#define RADEON_PP_TEX_SIZE_2 0x1d14 ++ ++#define RADEON_PP_CUBIC_FACES_0 0x1d24 ++#define RADEON_PP_CUBIC_FACES_1 0x1d28 ++#define RADEON_PP_CUBIC_FACES_2 0x1d2c ++#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */ ++#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00 ++#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14 ++ ++#define RADEON_SE_TCL_STATE_FLUSH 0x2284 ++ ++#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001 ++#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000 ++#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012 ++#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100 ++#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200 ++#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001 ++#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002 ++#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b ++#define R200_3D_DRAW_IMMD_2 0xC0003500 ++#define R200_SE_VTX_FMT_1 0x208c ++#define R200_RE_CNTL 0x1c50 ++ ++#define R200_RB3D_BLENDCOLOR 0x3218 ++ ++#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 ++ ++#define R200_PP_TRI_PERF 0x2cf8 ++ ++#define R200_PP_AFS_0 0x2f80 ++#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ ++ ++#define R200_VAP_PVS_CNTL_1 0x22D0 ++ ++/* MPEG settings from VHA code */ ++#define RADEON_VHA_SETTO16_1 0x2694 ++#define RADEON_VHA_SETTO16_2 0x2680 ++#define RADEON_VHA_SETTO0_1 0x1840 ++#define RADEON_VHA_FB_OFFSET 0x19e4 ++#define RADEON_VHA_SETTO1AND70S 0x19d8 ++#define RADEON_VHA_DST_PITCH 0x1408 ++ ++// set as reference header ++#define RADEON_VHA_BACKFRAME0_OFF_Y 0x1840 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y 0x1844 ++#define RADEON_VHA_BACKFRAME0_OFF_U 0x1848 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U 0x184c ++#define RADOEN_VHA_BACKFRAME0_OFF_V 0x1850 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V 0x1854 ++#define RADEON_VHA_FORWFRAME0_OFF_Y 0x1858 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y 0x185c ++#define RADEON_VHA_FORWFRAME0_OFF_U 0x1860 ++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U 0x1864 ++#define RADEON_VHA_FORWFRAME0_OFF_V 0x1868 ++#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V 0x1880 ++#define RADEON_VHA_BACKFRAME0_OFF_Y_2 0x1884 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2 0x1888 ++#define RADEON_VHA_BACKFRAME0_OFF_U_2 0x188c ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2 0x1890 ++#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894 ++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898 ++ ++#define R500_D1CRTC_STATUS 0x609c ++#define R500_D2CRTC_STATUS 0x689c ++#define R500_CRTC_V_BLANK (1<<0) ++ ++#define R500_D1CRTC_FRAME_COUNT 0x60a4 ++#define R500_D2CRTC_FRAME_COUNT 0x68a4 ++ ++#define R500_D1MODE_V_COUNTER 0x6530 ++#define R500_D2MODE_V_COUNTER 0x6d30 ++ ++#define R500_D1MODE_VBLANK_STATUS 0x6534 ++#define R500_D2MODE_VBLANK_STATUS 0x6d34 ++#define R500_VBLANK_OCCURED (1<<0) ++#define R500_VBLANK_ACK (1<<4) ++#define R500_VBLANK_STAT (1<<12) ++#define R500_VBLANK_INT (1<<16) ++ ++#define R500_DxMODE_INT_MASK 0x6540 ++#define R500_D1MODE_INT_MASK (1<<0) ++#define R500_D2MODE_INT_MASK (1<<8) ++ ++#define R500_DISP_INTERRUPT_STATUS 0x7edc ++#define R500_D1_VBLANK_INTERRUPT (1 << 4) ++#define R500_D2_VBLANK_INTERRUPT (1 << 5) ++ ++/* Constants */ ++#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ ++ ++#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0 ++#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1 ++#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2 ++#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3 ++#define RADEON_LAST_DISPATCH 1 ++ ++#define RADEON_MAX_VB_AGE 0x7fffffff ++#define RADEON_MAX_VB_VERTS (0xffff) ++ ++#define RADEON_RING_HIGH_MARK 128 ++ ++#define RADEON_PCIGART_TABLE_SIZE (32*1024) ++ ++#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) ++#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) ++#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) ++ ++#define RADEON_WRITE_PLL( addr, val ) \ ++do { \ ++ RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX, \ ++ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ ++ RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \ ++} while (0) ++ ++#define RADEON_WRITE_PCIE( addr, val ) \ ++do { \ ++ RADEON_WRITE8( RADEON_PCIE_INDEX, \ ++ ((addr) & 0xff)); \ ++ RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \ ++} while (0) ++ ++#define R500_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ ++ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ ++ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ ++} while (0) ++ ++#define RS480_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE( RS480_NB_MC_INDEX, \ ++ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ ++ RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \ ++ RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \ ++} while (0) ++ ++#define RS690_WRITE_MCIND( addr, val ) \ ++do { \ ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ ++ RADEON_WRITE(RS690_MC_DATA, val); \ ++ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ ++} while (0) ++ ++#define IGP_WRITE_MCIND( addr, val ) \ ++do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ ++ RS690_WRITE_MCIND( addr, val ); \ ++ else \ ++ RS480_WRITE_MCIND( addr, val ); \ ++} while (0) ++ ++#define CP_PACKET0( reg, n ) \ ++ (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) ++#define CP_PACKET0_TABLE( reg, n ) \ ++ (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2)) ++#define CP_PACKET1( reg0, reg1 ) \ ++ (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2)) ++#define CP_PACKET2() \ ++ (RADEON_CP_PACKET2) ++#define CP_PACKET3( pkt, n ) \ ++ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16)) ++ ++/* ================================================================ ++ * Engine control helper macros ++ */ ++ ++#define RADEON_WAIT_UNTIL_2D_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_3D_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_IDLE() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \ ++ RADEON_WAIT_3D_IDLECLEAN | \ ++ RADEON_WAIT_HOST_IDLECLEAN) ); \ ++} while (0) ++ ++#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \ ++ OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \ ++ OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \ ++} while (0) ++ ++#define RADEON_FLUSH_CACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_DC_FLUSH); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_RB3D_DC_FLUSH); \ ++ } \ ++} while (0) ++ ++#define RADEON_PURGE_CACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \ ++ } \ ++} while (0) ++ ++#define RADEON_FLUSH_ZCACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ ++ OUT_RING( RADEON_RB3D_ZC_FLUSH ); \ ++ } else { \ ++ OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \ ++ OUT_RING( R300_ZC_FLUSH ); \ ++ } \ ++} while (0) ++ ++#define RADEON_PURGE_ZCACHE() do { \ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ ++ OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ ++ OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ ++ } else { \ ++ OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ ++ OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ ++ } \ ++} while (0) ++ ++/* ================================================================ ++ * Misc helper macros ++ */ ++ ++/* Perfbox functionality only. ++ */ ++#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ ++ u32 head = GET_RING_HEAD( dev_priv ); \ ++ if (head == dev_priv->ring.tail) \ ++ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ ++ } \ ++} while (0) ++ ++#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ ++do { \ ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \ ++ if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \ ++ int __ret = radeon_do_cp_idle( dev_priv ); \ ++ if ( __ret ) return __ret; \ ++ sarea_priv->last_dispatch = 0; \ ++ radeon_freelist_reset( dev ); \ ++ } \ ++} while (0) ++ ++#define RADEON_DISPATCH_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++#define RADEON_FRAME_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++#define RADEON_CLEAR_AGE( age ) do { \ ++ OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \ ++ OUT_RING( age ); \ ++} while (0) ++ ++/* ================================================================ ++ * Ring control ++ */ ++ ++#define RADEON_VERBOSE 0 ++ ++#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring; ++ ++#define BEGIN_RING( n ) do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ ++ } \ ++ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ ++ COMMIT_RING(); \ ++ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ ++ } \ ++ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ++ ring = dev_priv->ring.start; \ ++ write = dev_priv->ring.tail; \ ++ mask = dev_priv->ring.tail_mask; \ ++} while (0) ++ ++#define ADVANCE_RING() do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ ++ write, dev_priv->ring.tail ); \ ++ } \ ++ if (((dev_priv->ring.tail + _nr) & mask) != write) { \ ++ DRM_ERROR( \ ++ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ++ ((dev_priv->ring.tail + _nr) & mask), \ ++ write, __LINE__); \ ++ } else \ ++ dev_priv->ring.tail = write; \ ++} while (0) ++ ++#define COMMIT_RING() do { \ ++ /* Flush writes to ring */ \ ++ DRM_MEMORYBARRIER(); \ ++ GET_RING_HEAD( dev_priv ); \ ++ RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ ++ /* read from PCI bus to ensure correct posting */ \ ++ RADEON_READ( RADEON_CP_RB_RPTR ); \ ++} while (0) ++ ++#define OUT_RING( x ) do { \ ++ if ( RADEON_VERBOSE ) { \ ++ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ ++ (unsigned int)(x), write ); \ ++ } \ ++ ring[write++] = (x); \ ++ write &= mask; \ ++} while (0) ++ ++#define OUT_RING_REG( reg, val ) do { \ ++ OUT_RING( CP_PACKET0( reg, 0 ) ); \ ++ OUT_RING( val ); \ ++} while (0) ++ ++#define OUT_RING_TABLE( tab, sz ) do { \ ++ int _size = (sz); \ ++ int *_tab = (int *)(tab); \ ++ \ ++ if (write + _size > mask) { \ ++ int _i = (mask+1) - write; \ ++ _size -= _i; \ ++ while (_i > 0) { \ ++ *(int *)(ring + write) = *_tab++; \ ++ write++; \ ++ _i--; \ ++ } \ ++ write = 0; \ ++ _tab += _i; \ ++ } \ ++ while (_size > 0) { \ ++ *(ring + write) = *_tab++; \ ++ write++; \ ++ _size--; \ ++ } \ ++ write &= mask; \ ++} while (0) ++ ++#endif /* __RADEON_DRV_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_ioc32.c git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c +--- git/drivers/gpu/drm-tungsten/radeon_ioc32.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,424 @@ ++/** ++ * \file radeon_ioc32.c ++ * ++ * 32-bit ioctl compatibility routines for the Radeon DRM. ++ * ++ * \author Paul Mackerras ++ * ++ * Copyright (C) Paul Mackerras 2005 ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS ++ * IN THE SOFTWARE. ++ */ ++#include ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++typedef struct drm_radeon_init32 { ++ int func; ++ u32 sarea_priv_offset; ++ int is_pci; ++ int cp_mode; ++ int gart_size; ++ int ring_size; ++ int usec_timeout; ++ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ u32 fb_offset; ++ u32 mmio_offset; ++ u32 ring_offset; ++ u32 ring_rptr_offset; ++ u32 buffers_offset; ++ u32 gart_textures_offset; ++} drm_radeon_init32_t; ++ ++static int compat_radeon_cp_init(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_init32_t init32; ++ drm_radeon_init_t __user *init; ++ ++ if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) ++ return -EFAULT; ++ ++ init = compat_alloc_user_space(sizeof(*init)); ++ if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) ++ || __put_user(init32.func, &init->func) ++ || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) ++ || __put_user(init32.is_pci, &init->is_pci) ++ || __put_user(init32.cp_mode, &init->cp_mode) ++ || __put_user(init32.gart_size, &init->gart_size) ++ || __put_user(init32.ring_size, &init->ring_size) ++ || __put_user(init32.usec_timeout, &init->usec_timeout) ++ || __put_user(init32.fb_bpp, &init->fb_bpp) ++ || __put_user(init32.front_offset, &init->front_offset) ++ || __put_user(init32.front_pitch, &init->front_pitch) ++ || __put_user(init32.back_offset, &init->back_offset) ++ || __put_user(init32.back_pitch, &init->back_pitch) ++ || __put_user(init32.depth_bpp, &init->depth_bpp) ++ || __put_user(init32.depth_offset, &init->depth_offset) ++ || __put_user(init32.depth_pitch, &init->depth_pitch) ++ || __put_user(init32.fb_offset, &init->fb_offset) ++ || __put_user(init32.mmio_offset, &init->mmio_offset) ++ || __put_user(init32.ring_offset, &init->ring_offset) ++ || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) ++ || __put_user(init32.buffers_offset, &init->buffers_offset) ++ || __put_user(init32.gart_textures_offset, ++ &init->gart_textures_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init); ++} ++ ++typedef struct drm_radeon_clear32 { ++ unsigned int flags; ++ unsigned int clear_color; ++ unsigned int clear_depth; ++ unsigned int color_mask; ++ unsigned int depth_mask; /* misnamed field: should be stencil */ ++ u32 depth_boxes; ++} drm_radeon_clear32_t; ++ ++static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_clear32_t clr32; ++ drm_radeon_clear_t __user *clr; ++ ++ if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32))) ++ return -EFAULT; ++ ++ clr = compat_alloc_user_space(sizeof(*clr)); ++ if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr)) ++ || __put_user(clr32.flags, &clr->flags) ++ || __put_user(clr32.clear_color, &clr->clear_color) ++ || __put_user(clr32.clear_depth, &clr->clear_depth) ++ || __put_user(clr32.color_mask, &clr->color_mask) ++ || __put_user(clr32.depth_mask, &clr->depth_mask) ++ || __put_user((void __user *)(unsigned long)clr32.depth_boxes, ++ &clr->depth_boxes)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr); ++} ++ ++typedef struct drm_radeon_stipple32 { ++ u32 mask; ++} drm_radeon_stipple32_t; ++ ++static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_stipple32_t __user *argp = (void __user *)arg; ++ drm_radeon_stipple_t __user *request; ++ u32 mask; ++ ++ if (get_user(mask, &argp->mask)) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((unsigned int __user *)(unsigned long) mask, ++ &request->mask)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_tex_image32 { ++ unsigned int x, y; /* Blit coordinates */ ++ unsigned int width, height; ++ u32 data; ++} drm_radeon_tex_image32_t; ++ ++typedef struct drm_radeon_texture32 { ++ unsigned int offset; ++ int pitch; ++ int format; ++ int width; /* Texture image coordinates */ ++ int height; ++ u32 image; ++} drm_radeon_texture32_t; ++ ++static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_texture32_t req32; ++ drm_radeon_texture_t __user *request; ++ drm_radeon_tex_image32_t img32; ++ drm_radeon_tex_image_t __user *image; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ if (req32.image == 0) ++ return -EINVAL; ++ if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image, ++ sizeof(img32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request) + sizeof(*image)); ++ if (!access_ok(VERIFY_WRITE, request, ++ sizeof(*request) + sizeof(*image))) ++ return -EFAULT; ++ image = (drm_radeon_tex_image_t __user *) (request + 1); ++ ++ if (__put_user(req32.offset, &request->offset) ++ || __put_user(req32.pitch, &request->pitch) ++ || __put_user(req32.format, &request->format) ++ || __put_user(req32.width, &request->width) ++ || __put_user(req32.height, &request->height) ++ || __put_user(image, &request->image) ++ || __put_user(img32.x, &image->x) ++ || __put_user(img32.y, &image->y) ++ || __put_user(img32.width, &image->width) ++ || __put_user(img32.height, &image->height) ++ || __put_user((const void __user *)(unsigned long)img32.data, ++ &image->data)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_vertex2_32 { ++ int idx; /* Index of vertex buffer */ ++ int discard; /* Client finished with buffer? */ ++ int nr_states; ++ u32 state; ++ int nr_prims; ++ u32 prim; ++} drm_radeon_vertex2_32_t; ++ ++static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_vertex2_32_t req32; ++ drm_radeon_vertex2_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.idx, &request->idx) ++ || __put_user(req32.discard, &request->discard) ++ || __put_user(req32.nr_states, &request->nr_states) ++ || __put_user((void __user *)(unsigned long)req32.state, ++ &request->state) ++ || __put_user(req32.nr_prims, &request->nr_prims) ++ || __put_user((void __user *)(unsigned long)req32.prim, ++ &request->prim)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_cmd_buffer32 { ++ int bufsz; ++ u32 buf; ++ int nbox; ++ u32 boxes; ++} drm_radeon_cmd_buffer32_t; ++ ++static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_cmd_buffer32_t req32; ++ drm_radeon_cmd_buffer_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.bufsz, &request->bufsz) ++ || __put_user((void __user *)(unsigned long)req32.buf, ++ &request->buf) ++ || __put_user(req32.nbox, &request->nbox) ++ || __put_user((void __user *)(unsigned long)req32.boxes, ++ &request->boxes)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_getparam32 { ++ int param; ++ u32 value; ++} drm_radeon_getparam32_t; ++ ++static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_getparam32_t req32; ++ drm_radeon_getparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_mem_alloc32 { ++ int region; ++ int alignment; ++ int size; ++ u32 region_offset; /* offset from start of fb or GART */ ++} drm_radeon_mem_alloc32_t; ++ ++static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_mem_alloc32_t req32; ++ drm_radeon_mem_alloc_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.region, &request->region) ++ || __put_user(req32.alignment, &request->alignment) ++ || __put_user(req32.size, &request->size) ++ || __put_user((int __user *)(unsigned long)req32.region_offset, ++ &request->region_offset)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_ALLOC, (unsigned long) request); ++} ++ ++typedef struct drm_radeon_irq_emit32 { ++ u32 irq_seq; ++} drm_radeon_irq_emit32_t; ++ ++static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_irq_emit32_t req32; ++ drm_radeon_irq_emit_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user((int __user *)(unsigned long)req32.irq_seq, ++ &request->irq_seq)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); ++} ++ ++/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ ++#if defined (CONFIG_X86_64) || defined(CONFIG_IA64) ++typedef struct drm_radeon_setparam32 { ++ int param; ++ u64 value; ++} __attribute__((packed)) drm_radeon_setparam32_t; ++ ++static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ drm_radeon_setparam32_t req32; ++ drm_radeon_setparam_t __user *request; ++ ++ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) ++ return -EFAULT; ++ ++ request = compat_alloc_user_space(sizeof(*request)); ++ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ++ || __put_user(req32.param, &request->param) ++ || __put_user((void __user *)(unsigned long)req32.value, ++ &request->value)) ++ return -EFAULT; ++ ++ return drm_ioctl(file->f_dentry->d_inode, file, ++ DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); ++} ++#else ++#define compat_radeon_cp_setparam NULL ++#endif /* X86_64 || IA64 */ ++ ++drm_ioctl_compat_t *radeon_compat_ioctls[] = { ++ [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, ++ [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, ++ [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, ++ [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture, ++ [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2, ++ [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf, ++ [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam, ++ [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam, ++ [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc, ++ [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit, ++}; ++ ++/** ++ * Called whenever a 32-bit process running under a 64-bit kernel ++ * performs an ioctl on /dev/dri/card. ++ * ++ * \param filp file pointer. ++ * \param cmd command. ++ * \param arg user argument. ++ * \return zero on success or negative number on failure. ++ */ ++long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ drm_ioctl_compat_t *fn = NULL; ++ int ret; ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(filp, cmd, arg); ++ ++ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) ++ fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; ++ ++ lock_kernel(); /* XXX for now */ ++ if (fn != NULL) ++ ret = (*fn)(filp, cmd, arg); ++ else ++ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); ++ unlock_kernel(); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_irq.c git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c +--- git/drivers/gpu/drm-tungsten/radeon_irq.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,390 @@ ++/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ * Michel D�zer ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (state) ++ dev_priv->irq_enable_reg |= mask; ++ else ++ dev_priv->irq_enable_reg &= ~mask; ++ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); ++} ++ ++static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (state) ++ dev_priv->r500_disp_irq_reg |= mask; ++ else ++ dev_priv->r500_disp_irq_reg &= ~mask; ++ ++ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); ++} ++ ++int radeon_enable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ switch (crtc) { ++ case 0: ++ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1); ++ break; ++ case 1: ++ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return EINVAL; ++ } ++ } else { ++ switch (crtc) { ++ case 0: ++ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); ++ break; ++ case 1: ++ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ return EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++void radeon_disable_vblank(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ switch (crtc) { ++ case 0: ++ r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0); ++ break; ++ case 1: ++ r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ break; ++ } ++ } else { ++ switch (crtc) { ++ case 0: ++ radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); ++ break; ++ case 1: ++ radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); ++ break; ++ default: ++ DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", ++ crtc); ++ break; ++ } ++ } ++} ++ ++static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int) ++{ ++ u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); ++ u32 irq_mask = RADEON_SW_INT_TEST; ++ ++ *r500_disp_int = 0; ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ /* vbl interrupts in a different place */ ++ ++ if (irqs & R500_DISPLAY_INT_STATUS) { ++ /* if a display interrupt */ ++ u32 disp_irq; ++ ++ disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS); ++ ++ *r500_disp_int = disp_irq; ++ if (disp_irq & R500_D1_VBLANK_INTERRUPT) { ++ RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK); ++ } ++ if (disp_irq & R500_D2_VBLANK_INTERRUPT) { ++ RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK); ++ } ++ } ++ irq_mask |= R500_DISPLAY_INT_STATUS; ++ } else ++ irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT; ++ ++ irqs &= irq_mask; ++ ++ if (irqs) ++ RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); ++ ++ return irqs; ++} ++ ++/* Interrupts - Used for device synchronization and flushing in the ++ * following circumstances: ++ * ++ * - Exclusive FB access with hw idle: ++ * - Wait for GUI Idle (?) interrupt, then do normal flush. ++ * ++ * - Frame throttling, NV_fence: ++ * - Drop marker irq's into command stream ahead of time. ++ * - Wait on irq's with lock *not held* ++ * - Check each for termination condition ++ * ++ * - Internally in cp_getbuffer, etc: ++ * - as above, but wait with lock held??? ++ * ++ * NOTE: These functions are misleadingly named -- the irq's aren't ++ * tied to dma at all, this is just a hangover from dri prehistory. ++ */ ++ ++irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) ++{ ++ struct drm_device *dev = (struct drm_device *) arg; ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ u32 stat; ++ u32 r500_disp_int; ++ ++ /* Only consider the bits we're interested in - others could be used ++ * outside the DRM ++ */ ++ stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int); ++ if (!stat) ++ return IRQ_NONE; ++ ++ stat &= dev_priv->irq_enable_reg; ++ ++ /* SW interrupt */ ++ if (stat & RADEON_SW_INT_TEST) ++ DRM_WAKEUP(&dev_priv->swi_queue); ++ ++ /* VBLANK interrupt */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ if (r500_disp_int & R500_D1_VBLANK_INTERRUPT) ++ drm_handle_vblank(dev, 0); ++ if (r500_disp_int & R500_D2_VBLANK_INTERRUPT) ++ drm_handle_vblank(dev, 1); ++ } else { ++ if (stat & RADEON_CRTC_VBLANK_STAT) ++ drm_handle_vblank(dev, 0); ++ if (stat & RADEON_CRTC2_VBLANK_STAT) ++ drm_handle_vblank(dev, 1); ++ } ++ return IRQ_HANDLED; ++} ++ ++static int radeon_emit_irq(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ unsigned int ret; ++ RING_LOCALS; ++ ++ atomic_inc(&dev_priv->swi_emitted); ++ ret = atomic_read(&dev_priv->swi_emitted); ++ ++ BEGIN_RING(4); ++ OUT_RING_REG(RADEON_LAST_SWI_REG, ret); ++ OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return ret; ++} ++ ++static int radeon_wait_irq(struct drm_device * dev, int swi_nr) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ int ret = 0; ++ ++ if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr) ++ return 0; ++ ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ ++ DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ, ++ RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); ++ ++ return ret; ++} ++ ++u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (crtc < 0 || crtc > 1) { ++ DRM_ERROR("Invalid crtc %d\n", crtc); ++ return -EINVAL; ++ } ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { ++ if (crtc == 0) ++ return RADEON_READ(R500_D1CRTC_FRAME_COUNT); ++ else ++ return RADEON_READ(R500_D2CRTC_FRAME_COUNT); ++ } else { ++ if (crtc == 0) ++ return RADEON_READ(RADEON_CRTC_CRNT_FRAME); ++ else ++ return RADEON_READ(RADEON_CRTC2_CRNT_FRAME); ++ } ++} ++ ++/* Needs the lock as it touches the ring. ++ */ ++int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_irq_emit_t *emit = data; ++ int result; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ result = radeon_emit_irq(dev); ++ ++ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++/* Doesn't need the hardware lock. ++ */ ++int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_irq_wait_t *irqwait = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ return radeon_wait_irq(dev, irqwait->irq_seq); ++} ++ ++/* drm_dma.h hooks ++*/ ++void radeon_driver_irq_preinstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ u32 dummy; ++ ++ /* Disable *all* interrupts */ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++ ++ /* Clear bits if they're already high */ ++ radeon_acknowledge_irqs(dev_priv, &dummy); ++} ++ ++int radeon_driver_irq_postinstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ int ret; ++ ++ atomic_set(&dev_priv->swi_emitted, 0); ++ DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); ++ ++ ret = drm_vblank_init(dev, 2); ++ if (ret) ++ return ret; ++ ++ dev->max_vblank_count = 0x001fffff; ++ ++ radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); ++ ++ return 0; ++} ++ ++void radeon_driver_irq_uninstall(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = ++ (drm_radeon_private_t *) dev->dev_private; ++ if (!dev_priv) ++ return; ++ ++ dev_priv->irq_enabled = 0; ++ ++ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) ++ RADEON_WRITE(R500_DxMODE_INT_MASK, 0); ++ /* Disable *all* interrupts */ ++ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); ++} ++ ++ ++int radeon_vblank_crtc_get(struct drm_device *dev) ++{ ++ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; ++ ++ return dev_priv->vblank_crtc; ++} ++ ++int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) ++{ ++ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; ++ if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { ++ DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value); ++ return -EINVAL; ++ } ++ dev_priv->vblank_crtc = (unsigned int)value; ++ return 0; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_mem.c git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c +--- git/drivers/gpu/drm-tungsten/radeon_mem.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,302 @@ ++/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */ ++/* ++ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. ++ * ++ * The Weather Channel (TM) funded Tungsten Graphics to develop the ++ * initial release of the Radeon 8500 driver under the XFree86 license. ++ * This notice must be preserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Keith Whitwell ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/* Very simple allocator for GART memory, working on a static range ++ * already mapped into each client's address space. ++ */ ++ ++static struct mem_block *split_block(struct mem_block *p, int start, int size, ++ struct drm_file *file_priv) ++{ ++ /* Maybe cut off the start of an existing block */ ++ if (start > p->start) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start; ++ newblock->size = p->size - (start - p->start); ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size -= newblock->size; ++ p = newblock; ++ } ++ ++ /* Maybe cut off the end of an existing block */ ++ if (size < p->size) { ++ struct mem_block *newblock = ++ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); ++ if (!newblock) ++ goto out; ++ newblock->start = start + size; ++ newblock->size = p->size - size; ++ newblock->file_priv = NULL; ++ newblock->next = p->next; ++ newblock->prev = p; ++ p->next->prev = newblock; ++ p->next = newblock; ++ p->size = size; ++ } ++ ++ out: ++ /* Our block is in the middle */ ++ p->file_priv = file_priv; ++ return p; ++} ++ ++static struct mem_block *alloc_block(struct mem_block *heap, int size, ++ int align2, struct drm_file *file_priv) ++{ ++ struct mem_block *p; ++ int mask = (1 << align2) - 1; ++ ++ list_for_each(p, heap) { ++ int start = (p->start + mask) & ~mask; ++ if (p->file_priv == NULL && start + size <= p->start + p->size) ++ return split_block(p, start, size, file_priv); ++ } ++ ++ return NULL; ++} ++ ++static struct mem_block *find_block(struct mem_block *heap, int start) ++{ ++ struct mem_block *p; ++ ++ list_for_each(p, heap) ++ if (p->start == start) ++ return p; ++ ++ return NULL; ++} ++ ++static void free_block(struct mem_block *p) ++{ ++ p->file_priv = NULL; ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ if (p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_BUFS); ++ } ++ ++ if (p->prev->file_priv == NULL) { ++ struct mem_block *q = p->prev; ++ q->size += p->size; ++ q->next = p->next; ++ q->next->prev = q; ++ drm_free(p, sizeof(*q), DRM_MEM_BUFS); ++ } ++} ++ ++/* Initialize. How to check for an uninitialized heap? ++ */ ++static int init_heap(struct mem_block **heap, int start, int size) ++{ ++ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); ++ ++ if (!blocks) ++ return -ENOMEM; ++ ++ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); ++ if (!*heap) { ++ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); ++ return -ENOMEM; ++ } ++ ++ blocks->start = start; ++ blocks->size = size; ++ blocks->file_priv = NULL; ++ blocks->next = blocks->prev = *heap; ++ ++ memset(*heap, 0, sizeof(**heap)); ++ (*heap)->file_priv = (struct drm_file *) - 1; ++ (*heap)->next = (*heap)->prev = blocks; ++ return 0; ++} ++ ++/* Free all blocks associated with the releasing file. ++ */ ++void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap) ++{ ++ struct mem_block *p; ++ ++ if (!heap || !heap->next) ++ return; ++ ++ list_for_each(p, heap) { ++ if (p->file_priv == file_priv) ++ p->file_priv = NULL; ++ } ++ ++ /* Assumes a single contiguous range. Needs a special file_priv in ++ * 'heap' to stop it being subsumed. ++ */ ++ list_for_each(p, heap) { ++ while (p->file_priv == NULL && p->next->file_priv == NULL) { ++ struct mem_block *q = p->next; ++ p->size += q->size; ++ p->next = q->next; ++ p->next->prev = p; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ } ++} ++ ++/* Shutdown. ++ */ ++void radeon_mem_takedown(struct mem_block **heap) ++{ ++ struct mem_block *p; ++ ++ if (!*heap) ++ return; ++ ++ for (p = (*heap)->next; p != *heap;) { ++ struct mem_block *q = p; ++ p = p->next; ++ drm_free(q, sizeof(*q), DRM_MEM_DRIVER); ++ } ++ ++ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); ++ *heap = NULL; ++} ++ ++/* IOCTL HANDLERS */ ++ ++static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) ++{ ++ switch (region) { ++ case RADEON_MEM_REGION_GART: ++ return &dev_priv->gart_heap; ++ case RADEON_MEM_REGION_FB: ++ return &dev_priv->fb_heap; ++ default: ++ return NULL; ++ } ++} ++ ++int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_alloc_t *alloc = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, alloc->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ /* Make things easier on ourselves: all allocations at least ++ * 4k aligned. ++ */ ++ if (alloc->alignment < 12) ++ alloc->alignment = 12; ++ ++ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); ++ ++ if (!block) ++ return -ENOMEM; ++ ++ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, ++ sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_free_t *memfree = data; ++ struct mem_block *block, **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, memfree->region); ++ if (!heap || !*heap) ++ return -EFAULT; ++ ++ block = find_block(*heap, memfree->region_offset); ++ if (!block) ++ return -EFAULT; ++ ++ if (block->file_priv != file_priv) ++ return -EPERM; ++ ++ free_block(block); ++ return 0; ++} ++ ++int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_mem_init_heap_t *initheap = data; ++ struct mem_block **heap; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ heap = get_heap(dev_priv, initheap->region); ++ if (!heap) ++ return -EFAULT; ++ ++ if (*heap) { ++ DRM_ERROR("heap already initialized?"); ++ return -EFAULT; ++ } ++ ++ return init_heap(heap, initheap->start, initheap->size); ++} +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_microcode.h git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h +--- git/drivers/gpu/drm-tungsten/radeon_microcode.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1844 @@ ++/* ++ * Copyright 2007 Advanced Micro Devices, Inc. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef RADEON_MICROCODE_H ++#define RADEON_MICROCODE_H ++ ++/* production radeon ucode r1xx-r6xx */ ++static const u32 R100_cp_microcode[][2]={ ++ { 0x21007000, 0000000000 }, ++ { 0x20007000, 0000000000 }, ++ { 0x000000b4, 0x00000004 }, ++ { 0x000000b8, 0x00000004 }, ++ { 0x6f5b4d4c, 0000000000 }, ++ { 0x4c4c427f, 0000000000 }, ++ { 0x5b568a92, 0000000000 }, ++ { 0x4ca09c6d, 0000000000 }, ++ { 0xad4c4c4c, 0000000000 }, ++ { 0x4ce1af3d, 0000000000 }, ++ { 0xd8afafaf, 0000000000 }, ++ { 0xd64c4cdc, 0000000000 }, ++ { 0x4cd10d10, 0000000000 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x362f242d, 0000000000 }, ++ { 0x00000012, 0x00000004 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x362f282d, 0000000000 }, ++ { 0x000380e7, 0x00000002 }, ++ { 0x04002c97, 0x00000002 }, ++ { 0x000f0001, 0x00000016 }, ++ { 0x333a3730, 0000000000 }, ++ { 0x000077ef, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000021, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00000017, 0x00000004 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x040067e0, 0x00000002 }, ++ { 0x00000017, 0x00000004 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000037e1, 0x00000002 }, ++ { 0x040067e1, 0x00000006 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x000077e1, 0x00000002 }, ++ { 0x000077e1, 0x00000006 }, ++ { 0xffffffff, 0000000000 }, ++ { 0x10000000, 0000000000 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x040067e0, 0x00000006 }, ++ { 0x00007675, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000030, 0x00000018 }, ++ { 0x00000030, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x01605000, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00098000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x64c0603e, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00080000, 0x00000016 }, ++ { 0000000000, 0000000000 }, ++ { 0x0400251d, 0x00000002 }, ++ { 0x00007580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x04002580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x00000049, 0x00000004 }, ++ { 0x00005000, 0000000000 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x00019000, 0x00000002 }, ++ { 0x00011055, 0x00000014 }, ++ { 0x00000055, 0x00000012 }, ++ { 0x0400250f, 0x00000002 }, ++ { 0x0000504f, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007565, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000058, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x01e655b4, 0x00000002 }, ++ { 0x4401b0e4, 0x00000002 }, ++ { 0x01c110e4, 0x00000002 }, ++ { 0x26667066, 0x00000018 }, ++ { 0x040c2565, 0x00000002 }, ++ { 0x00000066, 0x00000018 }, ++ { 0x04002564, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x0000005d, 0x00000004 }, ++ { 0x00401069, 0x00000008 }, ++ { 0x00101000, 0x00000002 }, ++ { 0x000d80ff, 0x00000002 }, ++ { 0x0080006c, 0x00000008 }, ++ { 0x000f9000, 0x00000002 }, ++ { 0x000e00ff, 0x00000002 }, ++ { 0000000000, 0x00000006 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x0000005b, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00009000, 0x00000002 }, ++ { 0x00041000, 0x00000002 }, ++ { 0x0c00350e, 0x00000002 }, ++ { 0x00049000, 0x00000002 }, ++ { 0x00051000, 0x00000002 }, ++ { 0x01e785f8, 0x00000002 }, ++ { 0x00200000, 0x00000002 }, ++ { 0x0060007e, 0x0000000c }, ++ { 0x00007563, 0x00000002 }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x20007073, 0x00000004 }, ++ { 0x00005073, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00007577, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x0000750f, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00600083, 0x0000000c }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x000075f8, 0x00000002 }, ++ { 0x00000083, 0x00000004 }, ++ { 0x000a750e, 0x00000002 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x0020750f, 0x00000002 }, ++ { 0x00600086, 0x00000004 }, ++ { 0x00007570, 0x00000002 }, ++ { 0x00007571, 0x00000002 }, ++ { 0x00007572, 0x00000006 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00007568, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000095, 0x0000000c }, ++ { 0x00058000, 0x00000002 }, ++ { 0x0c607562, 0x00000002 }, ++ { 0x00000097, 0x00000004 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x00600096, 0x00000004 }, ++ { 0x400070e5, 0000000000 }, ++ { 0x000380e6, 0x00000002 }, ++ { 0x040025c5, 0x00000002 }, ++ { 0x000380e5, 0x00000002 }, ++ { 0x000000a8, 0x0000001c }, ++ { 0x000650aa, 0x00000018 }, ++ { 0x040025bb, 0x00000002 }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x040075bc, 0000000000 }, ++ { 0x000075bb, 0x00000002 }, ++ { 0x000075bc, 0000000000 }, ++ { 0x00090000, 0x00000006 }, ++ { 0x00090000, 0x00000002 }, ++ { 0x000d8002, 0x00000006 }, ++ { 0x00007832, 0x00000002 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x000380e7, 0x00000002 }, ++ { 0x04002c97, 0x00000002 }, ++ { 0x00007820, 0x00000002 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20077000, 0x00000002 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20007000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0120751b, 0x00000002 }, ++ { 0x8040750a, 0x00000002 }, ++ { 0x8040750b, 0x00000002 }, ++ { 0x00110000, 0x00000002 }, ++ { 0x000380e5, 0x00000002 }, ++ { 0x000000c6, 0x0000001c }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x844075bd, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x840075bb, 0x00000002 }, ++ { 0x000610ab, 0x00000018 }, ++ { 0x844075bc, 0x00000002 }, ++ { 0x000000c9, 0x00000004 }, ++ { 0x804075bd, 0x00000002 }, ++ { 0x800075bb, 0x00000002 }, ++ { 0x804075bc, 0x00000002 }, ++ { 0x00108000, 0x00000002 }, ++ { 0x01400000, 0x00000002 }, ++ { 0x006000cd, 0x0000000c }, ++ { 0x20c07000, 0x00000020 }, ++ { 0x000000cf, 0x00000012 }, ++ { 0x00800000, 0x00000006 }, ++ { 0x0080751d, 0x00000006 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000775c, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460275d, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x01e00830, 0x00000002 }, ++ { 0x21007000, 0000000000 }, ++ { 0x6464614d, 0000000000 }, ++ { 0x69687420, 0000000000 }, ++ { 0x00000073, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x000380d0, 0x00000002 }, ++ { 0x040025e0, 0x00000002 }, ++ { 0x000075e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000380e0, 0x00000002 }, ++ { 0x04002394, 0x00000002 }, ++ { 0x00005000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x00000008, 0000000000 }, ++ { 0x00000004, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R200_cp_microcode[][2]={ ++ { 0x21007000, 0000000000 }, ++ { 0x20007000, 0000000000 }, ++ { 0x000000bf, 0x00000004 }, ++ { 0x000000c3, 0x00000004 }, ++ { 0x7a685e5d, 0000000000 }, ++ { 0x5d5d5588, 0000000000 }, ++ { 0x68659197, 0000000000 }, ++ { 0x5da19f78, 0000000000 }, ++ { 0x5d5d5d5d, 0000000000 }, ++ { 0x5dee5d50, 0000000000 }, ++ { 0xf2acacac, 0000000000 }, ++ { 0xe75df9e9, 0000000000 }, ++ { 0xb1dd0e11, 0000000000 }, ++ { 0xe2afafaf, 0000000000 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x452f232d, 0000000000 }, ++ { 0x00000013, 0x00000004 }, ++ { 0x000f0000, 0x00000016 }, ++ { 0x452f272d, 0000000000 }, ++ { 0x000f0001, 0x00000016 }, ++ { 0x3e4d4a37, 0000000000 }, ++ { 0x000077ef, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000020, 0x0000001a }, ++ { 0x00004000, 0x0000001e }, ++ { 0x00000016, 0x00000004 }, ++ { 0x0003802a, 0x00000002 }, ++ { 0x040067e0, 0x00000002 }, ++ { 0x00000016, 0x00000004 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000037e1, 0x00000002 }, ++ { 0x040067e1, 0x00000006 }, ++ { 0x000077e0, 0x00000002 }, ++ { 0x000077e1, 0x00000002 }, ++ { 0x000077e1, 0x00000006 }, ++ { 0xffffffff, 0000000000 }, ++ { 0x10000000, 0000000000 }, ++ { 0x07f007f0, 0000000000 }, ++ { 0x0003802a, 0x00000002 }, ++ { 0x040067e0, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007675, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802b, 0x00000002 }, ++ { 0x04002676, 0x00000002 }, ++ { 0x00007677, 0x00000002 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0003802c, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002741, 0x00000002 }, ++ { 0x04002743, 0x00000002 }, ++ { 0x00007678, 0x00000006 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0x0000002f, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000037, 0x00000018 }, ++ { 0x00000037, 0x00000018 }, ++ { 0000000000, 0x00000006 }, ++ { 0x01605000, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00098000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x64c06051, 0x00000004 }, ++ { 0x00080000, 0x00000016 }, ++ { 0000000000, 0000000000 }, ++ { 0x0400251d, 0x00000002 }, ++ { 0x00007580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x04002580, 0x00000002 }, ++ { 0x00067581, 0x00000002 }, ++ { 0x0000005a, 0x00000004 }, ++ { 0x00005000, 0000000000 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x00019000, 0x00000002 }, ++ { 0x00011064, 0x00000014 }, ++ { 0x00000064, 0x00000012 }, ++ { 0x0400250f, 0x00000002 }, ++ { 0x0000505e, 0x00000004 }, ++ { 0x00007565, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000065, 0x00000004 }, ++ { 0x01e655b4, 0x00000002 }, ++ { 0x4401b0f0, 0x00000002 }, ++ { 0x01c110f0, 0x00000002 }, ++ { 0x26667071, 0x00000018 }, ++ { 0x040c2565, 0x00000002 }, ++ { 0x00000071, 0x00000018 }, ++ { 0x04002564, 0x00000002 }, ++ { 0x00007566, 0x00000002 }, ++ { 0x00000068, 0x00000004 }, ++ { 0x00401074, 0x00000008 }, ++ { 0x00101000, 0x00000002 }, ++ { 0x000d80ff, 0x00000002 }, ++ { 0x00800077, 0x00000008 }, ++ { 0x000f9000, 0x00000002 }, ++ { 0x000e00ff, 0x00000002 }, ++ { 0000000000, 0x00000006 }, ++ { 0x00000094, 0x00000018 }, ++ { 0x00000068, 0x00000004 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00065000, 0x00000002 }, ++ { 0x00009000, 0x00000002 }, ++ { 0x00041000, 0x00000002 }, ++ { 0x0c00350e, 0x00000002 }, ++ { 0x00049000, 0x00000002 }, ++ { 0x00051000, 0x00000002 }, ++ { 0x01e785f8, 0x00000002 }, ++ { 0x00200000, 0x00000002 }, ++ { 0x00600087, 0x0000000c }, ++ { 0x00007563, 0x00000002 }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x2000707c, 0x00000004 }, ++ { 0x0000507c, 0x00000004 }, ++ { 0x00007576, 0x00000002 }, ++ { 0x00007577, 0x00000002 }, ++ { 0x0000750e, 0x00000002 }, ++ { 0x0000750f, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x0060008a, 0x0000000c }, ++ { 0x006075f0, 0x00000021 }, ++ { 0x000075f8, 0x00000002 }, ++ { 0x0000008a, 0x00000004 }, ++ { 0x000a750e, 0x00000002 }, ++ { 0x0020750f, 0x00000002 }, ++ { 0x0060008d, 0x00000004 }, ++ { 0x00007570, 0x00000002 }, ++ { 0x00007571, 0x00000002 }, ++ { 0x00007572, 0x00000006 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00007568, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x00000098, 0x0000000c }, ++ { 0x00058000, 0x00000002 }, ++ { 0x0c607562, 0x00000002 }, ++ { 0x0000009a, 0x00000004 }, ++ { 0x00600099, 0x00000004 }, ++ { 0x400070f1, 0000000000 }, ++ { 0x000380f1, 0x00000002 }, ++ { 0x000000a7, 0x0000001c }, ++ { 0x000650a9, 0x00000018 }, ++ { 0x040025bb, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x040075bc, 0000000000 }, ++ { 0x000075bb, 0x00000002 }, ++ { 0x000075bc, 0000000000 }, ++ { 0x00090000, 0x00000006 }, ++ { 0x00090000, 0x00000002 }, ++ { 0x000d8002, 0x00000006 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x00007821, 0x00000002 }, ++ { 0x00007800, 0000000000 }, ++ { 0x01665000, 0x00000002 }, ++ { 0x000a0000, 0x00000002 }, ++ { 0x000671cc, 0x00000002 }, ++ { 0x0286f1cd, 0x00000002 }, ++ { 0x000000b7, 0x00000010 }, ++ { 0x21007000, 0000000000 }, ++ { 0x000000be, 0x0000001c }, ++ { 0x00065000, 0x00000002 }, ++ { 0x000a0000, 0x00000002 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x000b0000, 0x00000002 }, ++ { 0x38067000, 0x00000002 }, ++ { 0x000a00ba, 0x00000004 }, ++ { 0x20007000, 0000000000 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20077000, 0x00000002 }, ++ { 0x01200000, 0x00000002 }, ++ { 0x20007000, 0000000000 }, ++ { 0x00061000, 0x00000002 }, ++ { 0x0120751b, 0x00000002 }, ++ { 0x8040750a, 0x00000002 }, ++ { 0x8040750b, 0x00000002 }, ++ { 0x00110000, 0x00000002 }, ++ { 0x000380f1, 0x00000002 }, ++ { 0x000000d1, 0x0000001c }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x844075bd, 0x00000002 }, ++ { 0x000610a9, 0x00000018 }, ++ { 0x840075bb, 0x00000002 }, ++ { 0x000610aa, 0x00000018 }, ++ { 0x844075bc, 0x00000002 }, ++ { 0x000000d4, 0x00000004 }, ++ { 0x804075bd, 0x00000002 }, ++ { 0x800075bb, 0x00000002 }, ++ { 0x804075bc, 0x00000002 }, ++ { 0x00108000, 0x00000002 }, ++ { 0x01400000, 0x00000002 }, ++ { 0x006000d8, 0x0000000c }, ++ { 0x20c07000, 0x00000020 }, ++ { 0x000000da, 0x00000012 }, ++ { 0x00800000, 0x00000006 }, ++ { 0x0080751d, 0x00000006 }, ++ { 0x000025bb, 0x00000002 }, ++ { 0x000040d4, 0x00000004 }, ++ { 0x0000775c, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460275d, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x00007999, 0x00000002 }, ++ { 0x00a05000, 0x00000002 }, ++ { 0x00661000, 0x00000002 }, ++ { 0x0460299b, 0x00000020 }, ++ { 0x00004000, 0000000000 }, ++ { 0x01e00830, 0x00000002 }, ++ { 0x21007000, 0000000000 }, ++ { 0x00005000, 0x00000002 }, ++ { 0x00038056, 0x00000002 }, ++ { 0x040025e0, 0x00000002 }, ++ { 0x000075e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000380ed, 0x00000002 }, ++ { 0x04007394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000078c4, 0x00000002 }, ++ { 0x000078c5, 0x00000002 }, ++ { 0x000078c6, 0x00000002 }, ++ { 0x00007924, 0x00000002 }, ++ { 0x00007925, 0x00000002 }, ++ { 0x00007926, 0x00000002 }, ++ { 0x000000f2, 0x00000004 }, ++ { 0x00007924, 0x00000002 }, ++ { 0x00007925, 0x00000002 }, ++ { 0x00007926, 0x00000002 }, ++ { 0x000000f9, 0x00000004 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R300_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000000ae, 0x00000008 }, ++ { 0x000000b2, 0x00000008 }, ++ { 0x67554b4a, 0000000000 }, ++ { 0x4a4a4475, 0000000000 }, ++ { 0x55527d83, 0000000000 }, ++ { 0x4a8c8b65, 0000000000 }, ++ { 0x4aef4af6, 0000000000 }, ++ { 0x4ae14a4a, 0000000000 }, ++ { 0xe4979797, 0000000000 }, ++ { 0xdb4aebdd, 0000000000 }, ++ { 0x9ccc4a4a, 0000000000 }, ++ { 0xd1989898, 0000000000 }, ++ { 0x4a0f9ad6, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000080, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00012000, 0x00000004 }, ++ { 0x00082000, 0x00000004 }, ++ { 0x1800650e, 0x00000004 }, ++ { 0x00092000, 0x00000004 }, ++ { 0x000a2000, 0x00000004 }, ++ { 0x000f0000, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000074, 0x00000018 }, ++ { 0x0000e563, 0x00000004 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0000a069, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000077, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000077, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0007a, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000084, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000086, 0x00000008 }, ++ { 0x00c00085, 0x00000008 }, ++ { 0x000700e3, 0x00000004 }, ++ { 0x00000092, 0x00000038 }, ++ { 0x000ca094, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x000000a4, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x000000a1, 0x00000008 }, ++ { 0x000000a6, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x000000ad, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x001400a9, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700e3, 0x00000004 }, ++ { 0x000000c0, 0x00000038 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2094, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2095, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000c3, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000c7, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000c9, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080c3, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700e0, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000e4, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000eb, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000f3, 0x00000034 }, ++ { 0x000000f0, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R420_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00000099, 0x00000008 }, ++ { 0x0000009d, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0xd9d3dff6, 0000000000 }, ++ { 0x4ac54a4a, 0000000000 }, ++ { 0xc8828282, 0000000000 }, ++ { 0xbf4acfc1, 0000000000 }, ++ { 0x87b04a4a, 0000000000 }, ++ { 0xb5838383, 0000000000 }, ++ { 0x4a0f85ba, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x00000080, 0x00000038 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x0000008c, 0x00000008 }, ++ { 0x00000091, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x00000098, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x00140094, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x000000a4, 0x00000038 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000ab, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000ad, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080a7, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c4, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000c8, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cf, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000d7, 0x00000034 }, ++ { 0x000000d4, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x0000e1cc, 0x00000004 }, ++ { 0x0500e1cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000de, 0x00000034 }, ++ { 0x000000da, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x0019e1cc, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x0500a000, 0x00000004 }, ++ { 0x080041cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 RS600_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000000a0, 0x00000008 }, ++ { 0x000000a4, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0x4ae74af6, 0000000000 }, ++ { 0x4ad34a4a, 0000000000 }, ++ { 0xd6898989, 0000000000 }, ++ { 0xcd4addcf, 0000000000 }, ++ { 0x8ebe4ae2, 0000000000 }, ++ { 0xc38a8a8a, 0000000000 }, ++ { 0x4a0f8cc8, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700d5, 0x00000004 }, ++ { 0x00000084, 0x00000038 }, ++ { 0x000ca086, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000096, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x00000093, 0x00000008 }, ++ { 0x00000098, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000009f, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x0014009b, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700d5, 0x00000004 }, ++ { 0x000000b2, 0x00000038 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2086, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000b5, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000b9, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000bb, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080b5, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700d2, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000d6, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000dd, 0x00000008 }, ++ { 0x00e00116, 0000000000 }, ++ { 0x000700e1, 0x00000004 }, ++ { 0x0800401c, 0x00000004 }, ++ { 0x200050e7, 0x00000004 }, ++ { 0x0000e01d, 0x00000004 }, ++ { 0x000000e4, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000eb, 0x00000034 }, ++ { 0x000000e8, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 RS690_cp_microcode[][2]={ ++ { 0x000000dd, 0x00000008 }, ++ { 0x000000df, 0x00000008 }, ++ { 0x000000a0, 0x00000008 }, ++ { 0x000000a4, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0x4ad74af6, 0000000000 }, ++ { 0x4ac94a4a, 0000000000 }, ++ { 0xcc898989, 0000000000 }, ++ { 0xc34ad3c5, 0000000000 }, ++ { 0x8e4a4a4a, 0000000000 }, ++ { 0x4a8a8a8a, 0000000000 }, ++ { 0x4a0f8c4a, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000f041, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000f184, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000f185, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000f186, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000f187, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700cb, 0x00000004 }, ++ { 0x00000084, 0x00000038 }, ++ { 0x000ca086, 0x00000030 }, ++ { 0x080045bb, 0x00000004 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0800e5bc, 0000000000 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x00120000, 0x0000000c }, ++ { 0x00120000, 0x00000004 }, ++ { 0x001b0002, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x00000096, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x00000093, 0x00000008 }, ++ { 0x00000098, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000009f, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x0014009b, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00100000, 0x0000002c }, ++ { 0x00004000, 0000000000 }, ++ { 0x080045c8, 0x00000004 }, ++ { 0x00240005, 0x00000004 }, ++ { 0x08004d0b, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700cb, 0x00000004 }, ++ { 0x000000b7, 0x00000038 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bd, 0x00000005 }, ++ { 0x000c2086, 0x00000030 }, ++ { 0x0800e5bb, 0x00000005 }, ++ { 0x000c2087, 0x00000030 }, ++ { 0x0880e5bc, 0x00000005 }, ++ { 0x000000ba, 0x00000008 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000be, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000c0, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080ba, 0x00000008 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c8, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cc, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000d3, 0x00000008 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000db, 0x00000034 }, ++ { 0x000000d8, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x000000e1, 0x00000030 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x000000e1, 0x00000030 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x0025001b, 0x00000004 }, ++ { 0x00230000, 0x00000004 }, ++ { 0x00250005, 0x00000004 }, ++ { 0x000000e6, 0x00000034 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00244000, 0x00000004 }, ++ { 0x080045c8, 0x00000004 }, ++ { 0x00240005, 0x00000004 }, ++ { 0x08004d0b, 0x0000000c }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++static const u32 R520_cp_microcode[][2]={ ++ { 0x4200e000, 0000000000 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x00000099, 0x00000008 }, ++ { 0x0000009d, 0x00000008 }, ++ { 0x4a554b4a, 0000000000 }, ++ { 0x4a4a4467, 0000000000 }, ++ { 0x55526f75, 0000000000 }, ++ { 0x4a7e7d65, 0000000000 }, ++ { 0xe0dae6f6, 0000000000 }, ++ { 0x4ac54a4a, 0000000000 }, ++ { 0xc8828282, 0000000000 }, ++ { 0xbf4acfc1, 0000000000 }, ++ { 0x87b04ad5, 0000000000 }, ++ { 0xb5838383, 0000000000 }, ++ { 0x4a0f85ba, 0000000000 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000d0012, 0x00000038 }, ++ { 0x0000e8b4, 0x00000004 }, ++ { 0x000d0014, 0x00000038 }, ++ { 0x0000e8b6, 0x00000004 }, ++ { 0x000d0016, 0x00000038 }, ++ { 0x0000e854, 0x00000004 }, ++ { 0x000d0018, 0x00000038 }, ++ { 0x0000e855, 0x00000004 }, ++ { 0x000d001a, 0x00000038 }, ++ { 0x0000e856, 0x00000004 }, ++ { 0x000d001c, 0x00000038 }, ++ { 0x0000e857, 0x00000004 }, ++ { 0x000d001e, 0x00000038 }, ++ { 0x0000e824, 0x00000004 }, ++ { 0x000d0020, 0x00000038 }, ++ { 0x0000e825, 0x00000004 }, ++ { 0x000d0022, 0x00000038 }, ++ { 0x0000e830, 0x00000004 }, ++ { 0x000d0024, 0x00000038 }, ++ { 0x0000f0c0, 0x00000004 }, ++ { 0x000d0026, 0x00000038 }, ++ { 0x0000f0c1, 0x00000004 }, ++ { 0x000d0028, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002a, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002c, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d002e, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d0030, 0x00000038 }, ++ { 0x0000e000, 0x00000004 }, ++ { 0x000d0032, 0x00000038 }, ++ { 0x0000f180, 0x00000004 }, ++ { 0x000d0034, 0x00000038 }, ++ { 0x0000f393, 0x00000004 }, ++ { 0x000d0036, 0x00000038 }, ++ { 0x0000f38a, 0x00000004 }, ++ { 0x000d0038, 0x00000038 }, ++ { 0x0000f38e, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000043, 0x00000018 }, ++ { 0x00cce800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x08004800, 0x00000004 }, ++ { 0x0000003a, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x2000451d, 0x00000004 }, ++ { 0x0000e580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x08004580, 0x00000004 }, ++ { 0x000ce581, 0x00000004 }, ++ { 0x00000047, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x00032000, 0x00000004 }, ++ { 0x00022051, 0x00000028 }, ++ { 0x00000051, 0x00000024 }, ++ { 0x0800450f, 0x00000004 }, ++ { 0x0000a04b, 0x00000008 }, ++ { 0x0000e565, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000052, 0x00000008 }, ++ { 0x03cca5b4, 0x00000004 }, ++ { 0x05432000, 0x00000004 }, ++ { 0x00022000, 0x00000004 }, ++ { 0x4ccce05e, 0x00000030 }, ++ { 0x08274565, 0x00000004 }, ++ { 0x0000005e, 0x00000030 }, ++ { 0x08004564, 0x00000004 }, ++ { 0x0000e566, 0x00000004 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x00802061, 0x00000010 }, ++ { 0x00202000, 0x00000004 }, ++ { 0x001b00ff, 0x00000004 }, ++ { 0x01000064, 0x00000010 }, ++ { 0x001f2000, 0x00000004 }, ++ { 0x001c00ff, 0x00000004 }, ++ { 0000000000, 0x0000000c }, ++ { 0x00000072, 0x00000030 }, ++ { 0x00000055, 0x00000008 }, ++ { 0x0000e576, 0x00000004 }, ++ { 0x0000e577, 0x00000004 }, ++ { 0x0000e50e, 0x00000004 }, ++ { 0x0000e50f, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00000069, 0x00000018 }, ++ { 0x00c0e5f9, 0x000000c2 }, ++ { 0x00000069, 0x00000008 }, ++ { 0x0014e50e, 0x00000004 }, ++ { 0x0040e50f, 0x00000004 }, ++ { 0x00c0006c, 0x00000008 }, ++ { 0x0000e570, 0x00000004 }, ++ { 0x0000e571, 0x00000004 }, ++ { 0x0000e572, 0x0000000c }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x0000e568, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00000076, 0x00000018 }, ++ { 0x000b0000, 0x00000004 }, ++ { 0x18c0e562, 0x00000004 }, ++ { 0x00000078, 0x00000008 }, ++ { 0x00c00077, 0x00000008 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x00000080, 0x00000038 }, ++ { 0x0000e5bb, 0x00000004 }, ++ { 0x0000e5bc, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e800, 0000000000 }, ++ { 0x0000e821, 0x00000004 }, ++ { 0x0000e82e, 0000000000 }, ++ { 0x02cca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000ce1cc, 0x00000004 }, ++ { 0x050de1cd, 0x00000004 }, ++ { 0x00400000, 0x00000004 }, ++ { 0x0000008f, 0x00000018 }, ++ { 0x00c0a000, 0x00000004 }, ++ { 0x0000008c, 0x00000008 }, ++ { 0x00000091, 0x00000020 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x00000098, 0x00000038 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x00140000, 0x00000004 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x00160000, 0x00000004 }, ++ { 0x700ce000, 0x00000004 }, ++ { 0x00140094, 0x00000008 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x400ee000, 0x00000004 }, ++ { 0x02400000, 0x00000004 }, ++ { 0x4000e000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x0240e51b, 0x00000004 }, ++ { 0x0080e50a, 0x00000005 }, ++ { 0x0080e50b, 0x00000005 }, ++ { 0x00220000, 0x00000004 }, ++ { 0x000700c7, 0x00000004 }, ++ { 0x000000a4, 0x00000038 }, ++ { 0x0080e5bd, 0x00000005 }, ++ { 0x0000e5bb, 0x00000005 }, ++ { 0x0080e5bc, 0x00000005 }, ++ { 0x00210000, 0x00000004 }, ++ { 0x02800000, 0x00000004 }, ++ { 0x00c000ab, 0x00000018 }, ++ { 0x4180e000, 0x00000040 }, ++ { 0x000000ad, 0x00000024 }, ++ { 0x01000000, 0x0000000c }, ++ { 0x0100e51d, 0x0000000c }, ++ { 0x000045bb, 0x00000004 }, ++ { 0x000080a7, 0x00000008 }, ++ { 0x0000f3ce, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053cf, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f3d2, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c053d3, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x0000f39d, 0x00000004 }, ++ { 0x0140a000, 0x00000004 }, ++ { 0x00cc2000, 0x00000004 }, ++ { 0x08c0539e, 0x00000040 }, ++ { 0x00008000, 0000000000 }, ++ { 0x03c00830, 0x00000004 }, ++ { 0x4200e000, 0000000000 }, ++ { 0x0000a000, 0x00000004 }, ++ { 0x200045e0, 0x00000004 }, ++ { 0x0000e5e1, 0000000000 }, ++ { 0x00000001, 0000000000 }, ++ { 0x000700c4, 0x00000004 }, ++ { 0x0800e394, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x0000e8c4, 0x00000004 }, ++ { 0x0000e8c5, 0x00000004 }, ++ { 0x0000e8c6, 0x00000004 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000c8, 0x00000008 }, ++ { 0x0000e928, 0x00000004 }, ++ { 0x0000e929, 0x00000004 }, ++ { 0x0000e92a, 0x00000004 }, ++ { 0x000000cf, 0x00000008 }, ++ { 0xdeadbeef, 0000000000 }, ++ { 0x00000116, 0000000000 }, ++ { 0x000700d3, 0x00000004 }, ++ { 0x080050e7, 0x00000004 }, ++ { 0x000700d4, 0x00000004 }, ++ { 0x0800401c, 0x00000004 }, ++ { 0x0000e01d, 0000000000 }, ++ { 0x02c02000, 0x00000004 }, ++ { 0x00060000, 0x00000004 }, ++ { 0x000000de, 0x00000034 }, ++ { 0x000000db, 0x00000008 }, ++ { 0x00008000, 0x00000004 }, ++ { 0xc000e000, 0000000000 }, ++ { 0x0000e1cc, 0x00000004 }, ++ { 0x0500e1cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000e5, 0x00000034 }, ++ { 0x000000e1, 0x00000008 }, ++ { 0x0000a000, 0000000000 }, ++ { 0x0019e1cc, 0x00000004 }, ++ { 0x001b0001, 0x00000004 }, ++ { 0x0500a000, 0x00000004 }, ++ { 0x080041cd, 0x00000004 }, ++ { 0x000ca000, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0x000c2000, 0x00000004 }, ++ { 0x001d0018, 0x00000004 }, ++ { 0x001a0001, 0x00000004 }, ++ { 0x000000fb, 0x00000034 }, ++ { 0x0000004a, 0x00000008 }, ++ { 0x0500a04a, 0x00000008 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++ { 0000000000, 0000000000 }, ++}; ++ ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/radeon_state.c git-nokia/drivers/gpu/drm-tungsten/radeon_state.c +--- git/drivers/gpu/drm-tungsten/radeon_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/radeon_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,3263 @@ ++/* radeon_state.c -- State support for Radeon -*- linux-c -*- */ ++/* ++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: ++ * Gareth Hughes ++ * Kevin E. Martin ++ */ ++ ++#include "drmP.h" ++#include "drm.h" ++#include "drm_sarea.h" ++#include "radeon_drm.h" ++#include "radeon_drv.h" ++ ++/* ================================================================ ++ * Helper functions for client state checking and fixup ++ */ ++ ++static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ u32 * offset) ++{ ++ u64 off = *offset; ++ u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ /* Hrm ... the story of the offset ... So this function converts ++ * the various ideas of what userland clients might have for an ++ * offset in the card address space into an offset into the card ++ * address space :) So with a sane client, it should just keep ++ * the value intact and just do some boundary checking. However, ++ * not all clients are sane. Some older clients pass us 0 based ++ * offsets relative to the start of the framebuffer and some may ++ * assume the AGP aperture it appended to the framebuffer, so we ++ * try to detect those cases and fix them up. ++ * ++ * Note: It might be a good idea here to make sure the offset lands ++ * in some "allowed" area to protect things like the PCIE GART... ++ */ ++ ++ /* First, the best case, the offset already lands in either the ++ * framebuffer or the GART mapped space ++ */ ++ if (radeon_check_offset(dev_priv, off)) ++ return 0; ++ ++ /* Ok, that didn't happen... now check if we have a zero based ++ * offset that fits in the framebuffer + gart space, apply the ++ * magic offset we get from SETPARAM or calculated from fb_location ++ */ ++ if (off < (dev_priv->fb_size + dev_priv->gart_size)) { ++ radeon_priv = file_priv->driver_priv; ++ off += radeon_priv->radeon_fb_delta; ++ } ++ ++ /* Finally, assume we aimed at a GART offset if beyond the fb */ ++ if (off > fb_end) ++ off = off - fb_end - 1 + dev_priv->gart_vm_start; ++ ++ /* Now recheck and fail if out of bounds */ ++ if (radeon_check_offset(dev_priv, off)) { ++ DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); ++ *offset = off; ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ int id, u32 *data) ++{ ++ switch (id) { ++ ++ case RADEON_EMIT_PP_MISC: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_EMIT_PP_CNTL: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { ++ DRM_ERROR("Invalid colour buffer offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case R200_EMIT_PP_TXOFFSET_0: ++ case R200_EMIT_PP_TXOFFSET_1: ++ case R200_EMIT_PP_TXOFFSET_2: ++ case R200_EMIT_PP_TXOFFSET_3: ++ case R200_EMIT_PP_TXOFFSET_4: ++ case R200_EMIT_PP_TXOFFSET_5: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[0])) { ++ DRM_ERROR("Invalid R200 texture offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_EMIT_PP_TXFILTER_0: ++ case RADEON_EMIT_PP_TXFILTER_1: ++ case RADEON_EMIT_PP_TXFILTER_2: ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { ++ DRM_ERROR("Invalid R100 texture offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case R200_EMIT_PP_CUBIC_OFFSETS_0: ++ case R200_EMIT_PP_CUBIC_OFFSETS_1: ++ case R200_EMIT_PP_CUBIC_OFFSETS_2: ++ case R200_EMIT_PP_CUBIC_OFFSETS_3: ++ case R200_EMIT_PP_CUBIC_OFFSETS_4: ++ case R200_EMIT_PP_CUBIC_OFFSETS_5:{ ++ int i; ++ for (i = 0; i < 5; i++) { ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, ++ &data[i])) { ++ DRM_ERROR ++ ("Invalid R200 cubic texture offset\n"); ++ return -EINVAL; ++ } ++ } ++ break; ++ } ++ ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T0: ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T1: ++ case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ ++ int i; ++ for (i = 0; i < 5; i++) { ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, ++ &data[i])) { ++ DRM_ERROR ++ ("Invalid R100 cubic texture offset\n"); ++ return -EINVAL; ++ } ++ } ++ } ++ break; ++ ++ case R200_EMIT_VAP_CTL: { ++ RING_LOCALS; ++ BEGIN_RING(2); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ ADVANCE_RING(); ++ } ++ break; ++ ++ case RADEON_EMIT_RB3D_COLORPITCH: ++ case RADEON_EMIT_RE_LINE_PATTERN: ++ case RADEON_EMIT_SE_LINE_WIDTH: ++ case RADEON_EMIT_PP_LUM_MATRIX: ++ case RADEON_EMIT_PP_ROT_MATRIX_0: ++ case RADEON_EMIT_RB3D_STENCILREFMASK: ++ case RADEON_EMIT_SE_VPORT_XSCALE: ++ case RADEON_EMIT_SE_CNTL: ++ case RADEON_EMIT_SE_CNTL_STATUS: ++ case RADEON_EMIT_RE_MISC: ++ case RADEON_EMIT_PP_BORDER_COLOR_0: ++ case RADEON_EMIT_PP_BORDER_COLOR_1: ++ case RADEON_EMIT_PP_BORDER_COLOR_2: ++ case RADEON_EMIT_SE_ZBIAS_FACTOR: ++ case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT: ++ case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED: ++ case R200_EMIT_PP_TXCBLEND_0: ++ case R200_EMIT_PP_TXCBLEND_1: ++ case R200_EMIT_PP_TXCBLEND_2: ++ case R200_EMIT_PP_TXCBLEND_3: ++ case R200_EMIT_PP_TXCBLEND_4: ++ case R200_EMIT_PP_TXCBLEND_5: ++ case R200_EMIT_PP_TXCBLEND_6: ++ case R200_EMIT_PP_TXCBLEND_7: ++ case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: ++ case R200_EMIT_TFACTOR_0: ++ case R200_EMIT_VTX_FMT_0: ++ case R200_EMIT_MATRIX_SELECT_0: ++ case R200_EMIT_TEX_PROC_CTL_2: ++ case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: ++ case R200_EMIT_PP_TXFILTER_0: ++ case R200_EMIT_PP_TXFILTER_1: ++ case R200_EMIT_PP_TXFILTER_2: ++ case R200_EMIT_PP_TXFILTER_3: ++ case R200_EMIT_PP_TXFILTER_4: ++ case R200_EMIT_PP_TXFILTER_5: ++ case R200_EMIT_VTE_CNTL: ++ case R200_EMIT_OUTPUT_VTX_COMP_SEL: ++ case R200_EMIT_PP_TAM_DEBUG3: ++ case R200_EMIT_PP_CNTL_X: ++ case R200_EMIT_RB3D_DEPTHXY_OFFSET: ++ case R200_EMIT_RE_AUX_SCISSOR_CNTL: ++ case R200_EMIT_RE_SCISSOR_TL_0: ++ case R200_EMIT_RE_SCISSOR_TL_1: ++ case R200_EMIT_RE_SCISSOR_TL_2: ++ case R200_EMIT_SE_VAP_CNTL_STATUS: ++ case R200_EMIT_SE_VTX_STATE_CNTL: ++ case R200_EMIT_RE_POINTSIZE: ++ case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0: ++ case R200_EMIT_PP_CUBIC_FACES_0: ++ case R200_EMIT_PP_CUBIC_FACES_1: ++ case R200_EMIT_PP_CUBIC_FACES_2: ++ case R200_EMIT_PP_CUBIC_FACES_3: ++ case R200_EMIT_PP_CUBIC_FACES_4: ++ case R200_EMIT_PP_CUBIC_FACES_5: ++ case RADEON_EMIT_PP_TEX_SIZE_0: ++ case RADEON_EMIT_PP_TEX_SIZE_1: ++ case RADEON_EMIT_PP_TEX_SIZE_2: ++ case R200_EMIT_RB3D_BLENDCOLOR: ++ case R200_EMIT_TCL_POINT_SPRITE_CNTL: ++ case RADEON_EMIT_PP_CUBIC_FACES_0: ++ case RADEON_EMIT_PP_CUBIC_FACES_1: ++ case RADEON_EMIT_PP_CUBIC_FACES_2: ++ case R200_EMIT_PP_TRI_PERF_CNTL: ++ case R200_EMIT_PP_AFS_0: ++ case R200_EMIT_PP_AFS_1: ++ case R200_EMIT_ATF_TFACTOR: ++ case R200_EMIT_PP_TXCTLALL_0: ++ case R200_EMIT_PP_TXCTLALL_1: ++ case R200_EMIT_PP_TXCTLALL_2: ++ case R200_EMIT_PP_TXCTLALL_3: ++ case R200_EMIT_PP_TXCTLALL_4: ++ case R200_EMIT_PP_TXCTLALL_5: ++ case R200_EMIT_VAP_PVS_CNTL: ++ /* These packets don't contain memory offsets */ ++ break; ++ ++ default: ++ DRM_ERROR("Unknown state packet ID %d\n", id); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * ++ dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t * ++ cmdbuf, ++ unsigned int *cmdsz) ++{ ++ u32 *cmd = (u32 *) cmdbuf->buf; ++ u32 offset, narrays; ++ int count, i, k; ++ ++ *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); ++ ++ if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { ++ DRM_ERROR("Not a type 3 packet\n"); ++ return -EINVAL; ++ } ++ ++ if (4 * *cmdsz > cmdbuf->bufsz) { ++ DRM_ERROR("Packet size larger than size of data provided\n"); ++ return -EINVAL; ++ } ++ ++ switch(cmd[0] & 0xff00) { ++ /* XXX Are there old drivers needing other packets? */ ++ ++ case RADEON_3D_DRAW_IMMD: ++ case RADEON_3D_DRAW_VBUF: ++ case RADEON_3D_DRAW_INDX: ++ case RADEON_WAIT_FOR_IDLE: ++ case RADEON_CP_NOP: ++ case RADEON_3D_CLEAR_ZMASK: ++/* case RADEON_CP_NEXT_CHAR: ++ case RADEON_CP_PLY_NEXTSCAN: ++ case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */ ++ /* these packets are safe */ ++ break; ++ ++ case RADEON_CP_3D_DRAW_IMMD_2: ++ case RADEON_CP_3D_DRAW_VBUF_2: ++ case RADEON_CP_3D_DRAW_INDX_2: ++ case RADEON_3D_CLEAR_HIZ: ++ /* safe but r200 only */ ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non r200-class chip\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_3D_LOAD_VBPNTR: ++ count = (cmd[0] >> 16) & 0x3fff; ++ ++ if (count > 18) { /* 12 arrays max */ ++ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", ++ count); ++ return -EINVAL; ++ } ++ ++ /* carefully check packet contents */ ++ narrays = cmd[1] & ~0xc000; ++ k = 0; ++ i = 2; ++ while ((k < narrays) && (i < (count + 2))) { ++ i++; /* skip attribute field */ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &cmd[i])) { ++ DRM_ERROR ++ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ if (k == narrays) ++ break; ++ /* have one more to process, they come in pairs */ ++ if (radeon_check_and_fixup_offset(dev_priv, ++ file_priv, &cmd[i])) ++ { ++ DRM_ERROR ++ ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", ++ k, i); ++ return -EINVAL; ++ } ++ k++; ++ i++; ++ } ++ /* do the counts match what we expect ? */ ++ if ((k != narrays) || (i != (count + 2))) { ++ DRM_ERROR ++ ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", ++ k, i, narrays, count + 1); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_3D_RNDR_GEN_INDX_PRIM: ++ if (dev_priv->chip_family > CHIP_RS200) { ++ DRM_ERROR("Invalid 3d packet for non-r100-class chip\n"); ++ return -EINVAL; ++ } ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { ++ DRM_ERROR("Invalid rndr_gen_indx offset\n"); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_CP_INDX_BUFFER: ++ /* safe but r200 only */ ++ if ((dev_priv->chip_family < CHIP_R200) || ++ (dev_priv->chip_family > CHIP_RV280)) { ++ DRM_ERROR("Invalid 3d packet for non-r200-class chip\n"); ++ return -EINVAL; ++ } ++ if ((cmd[1] & 0x8000ffff) != 0x80000810) { ++ DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); ++ return -EINVAL; ++ } ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { ++ DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); ++ return -EINVAL; ++ } ++ break; ++ ++ case RADEON_CNTL_HOSTDATA_BLT: ++ case RADEON_CNTL_PAINT_MULTI: ++ case RADEON_CNTL_BITBLT_MULTI: ++ /* MSB of opcode: next DWORD GUI_CNTL */ ++ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL ++ | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[2] << 10; ++ if (radeon_check_and_fixup_offset ++ (dev_priv, file_priv, &offset)) { ++ DRM_ERROR("Invalid first packet offset\n"); ++ return -EINVAL; ++ } ++ cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; ++ } ++ ++ if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && ++ (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { ++ offset = cmd[3] << 10; ++ if (radeon_check_and_fixup_offset ++ (dev_priv, file_priv, &offset)) { ++ DRM_ERROR("Invalid second packet offset\n"); ++ return -EINVAL; ++ } ++ cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* ================================================================ ++ * CP hardware state programming functions ++ */ ++ ++static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv, ++ struct drm_clip_rect * box) ++{ ++ RING_LOCALS; ++ ++ DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n", ++ box->x1, box->y1, box->x2, box->y2); ++ ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0)); ++ OUT_RING((box->y1 << 16) | box->x1); ++ OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0)); ++ OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1)); ++ ADVANCE_RING(); ++} ++ ++/* Emit 1.1 state ++ */ ++static int radeon_emit_state(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_context_regs_t * ctx, ++ drm_radeon_texture_regs_t * tex, ++ unsigned int dirty) ++{ ++ RING_LOCALS; ++ DRM_DEBUG("dirty=0x%08x\n", dirty); ++ ++ if (dirty & RADEON_UPLOAD_CONTEXT) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &ctx->rb3d_depthoffset)) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &ctx->rb3d_coloroffset)) { ++ DRM_ERROR("Invalid depth buffer offset\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(14); ++ OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6)); ++ OUT_RING(ctx->pp_misc); ++ OUT_RING(ctx->pp_fog_color); ++ OUT_RING(ctx->re_solid_color); ++ OUT_RING(ctx->rb3d_blendcntl); ++ OUT_RING(ctx->rb3d_depthoffset); ++ OUT_RING(ctx->rb3d_depthpitch); ++ OUT_RING(ctx->rb3d_zstencilcntl); ++ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2)); ++ OUT_RING(ctx->pp_cntl); ++ OUT_RING(ctx->rb3d_cntl); ++ OUT_RING(ctx->rb3d_coloroffset); ++ OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0)); ++ OUT_RING(ctx->rb3d_colorpitch); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_VERTFMT) { ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0)); ++ OUT_RING(ctx->se_coord_fmt); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_LINE) { ++ BEGIN_RING(5); ++ OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1)); ++ OUT_RING(ctx->re_line_pattern); ++ OUT_RING(ctx->re_line_state); ++ OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0)); ++ OUT_RING(ctx->se_line_width); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_BUMPMAP) { ++ BEGIN_RING(5); ++ OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0)); ++ OUT_RING(ctx->pp_lum_matrix); ++ OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1)); ++ OUT_RING(ctx->pp_rot_matrix_0); ++ OUT_RING(ctx->pp_rot_matrix_1); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_MASKS) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2)); ++ OUT_RING(ctx->rb3d_stencilrefmask); ++ OUT_RING(ctx->rb3d_ropcntl); ++ OUT_RING(ctx->rb3d_planemask); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_VIEWPORT) { ++ BEGIN_RING(7); ++ OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5)); ++ OUT_RING(ctx->se_vport_xscale); ++ OUT_RING(ctx->se_vport_xoffset); ++ OUT_RING(ctx->se_vport_yscale); ++ OUT_RING(ctx->se_vport_yoffset); ++ OUT_RING(ctx->se_vport_zscale); ++ OUT_RING(ctx->se_vport_zoffset); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_SETUP) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0)); ++ OUT_RING(ctx->se_cntl); ++ OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0)); ++ OUT_RING(ctx->se_cntl_status); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_MISC) { ++ BEGIN_RING(2); ++ OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0)); ++ OUT_RING(ctx->re_misc); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX0) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[0].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 0\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5)); ++ OUT_RING(tex[0].pp_txfilter); ++ OUT_RING(tex[0].pp_txformat); ++ OUT_RING(tex[0].pp_txoffset); ++ OUT_RING(tex[0].pp_txcblend); ++ OUT_RING(tex[0].pp_txablend); ++ OUT_RING(tex[0].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0)); ++ OUT_RING(tex[0].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX1) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[1].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 1\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5)); ++ OUT_RING(tex[1].pp_txfilter); ++ OUT_RING(tex[1].pp_txformat); ++ OUT_RING(tex[1].pp_txoffset); ++ OUT_RING(tex[1].pp_txcblend); ++ OUT_RING(tex[1].pp_txablend); ++ OUT_RING(tex[1].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0)); ++ OUT_RING(tex[1].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ if (dirty & RADEON_UPLOAD_TEX2) { ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, ++ &tex[2].pp_txoffset)) { ++ DRM_ERROR("Invalid texture offset for unit 2\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5)); ++ OUT_RING(tex[2].pp_txfilter); ++ OUT_RING(tex[2].pp_txformat); ++ OUT_RING(tex[2].pp_txoffset); ++ OUT_RING(tex[2].pp_txcblend); ++ OUT_RING(tex[2].pp_txablend); ++ OUT_RING(tex[2].pp_tfactor); ++ OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0)); ++ OUT_RING(tex[2].pp_border_color); ++ ADVANCE_RING(); ++ } ++ ++ return 0; ++} ++ ++/* Emit 1.2 state ++ */ ++static int radeon_emit_state2(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_state_t * state) ++{ ++ RING_LOCALS; ++ ++ if (state->dirty & RADEON_UPLOAD_ZBIAS) { ++ BEGIN_RING(3); ++ OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1)); ++ OUT_RING(state->context2.se_zbias_factor); ++ OUT_RING(state->context2.se_zbias_constant); ++ ADVANCE_RING(); ++ } ++ ++ return radeon_emit_state(dev_priv, file_priv, &state->context, ++ state->tex, state->dirty); ++} ++ ++/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in ++ * 1.3 cmdbuffers allow all previous state to be updated as well as ++ * the tcl scalar and vector areas. ++ */ ++static struct { ++ int start; ++ int len; ++ const char *name; ++} packet[RADEON_MAX_STATE_PACKETS] = { ++ {RADEON_PP_MISC, 7, "RADEON_PP_MISC"}, ++ {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"}, ++ {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"}, ++ {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"}, ++ {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"}, ++ {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"}, ++ {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"}, ++ {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"}, ++ {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"}, ++ {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"}, ++ {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"}, ++ {RADEON_RE_MISC, 1, "RADEON_RE_MISC"}, ++ {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"}, ++ {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"}, ++ {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"}, ++ {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"}, ++ {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"}, ++ {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"}, ++ {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"}, ++ {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"}, ++ {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17, ++ "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"}, ++ {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"}, ++ {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"}, ++ {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"}, ++ {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"}, ++ {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"}, ++ {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"}, ++ {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"}, ++ {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"}, ++ {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"}, ++ {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"}, ++ {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"}, ++ {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"}, ++ {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"}, ++ {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"}, ++ {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"}, ++ {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"}, ++ {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"}, ++ {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"}, ++ {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"}, ++ {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"}, ++ {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"}, ++ {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"}, ++ {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"}, ++ {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"}, ++ {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"}, ++ {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, ++ {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, ++ {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, ++ {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, ++ "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, ++ {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, ++ {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, ++ {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, ++ {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"}, ++ {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"}, ++ {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"}, ++ {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"}, ++ {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"}, ++ {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"}, ++ {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"}, ++ {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, ++ "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, ++ {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ ++ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ ++ {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, ++ {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, ++ {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, ++ {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"}, ++ {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"}, ++ {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"}, ++ {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"}, ++ {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"}, ++ {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"}, ++ {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"}, ++ {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"}, ++ {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"}, ++ {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"}, ++ {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"}, ++ {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"}, ++ {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"}, ++ {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"}, ++ {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"}, ++ {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"}, ++ {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, ++ {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, ++ {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, ++ {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ ++ {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, ++ {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, ++ {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, ++ {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"}, ++ {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"}, ++ {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"}, ++ {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"}, ++ {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"}, ++ {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"}, ++}; ++ ++/* ================================================================ ++ * Performance monitoring functions ++ */ ++ ++static void radeon_clear_box(drm_radeon_private_t * dev_priv, ++ int x, int y, int w, int h, int r, int g, int b) ++{ ++ u32 color; ++ RING_LOCALS; ++ ++ x += dev_priv->sarea_priv->boxes[0].x1; ++ y += dev_priv->sarea_priv->boxes[0].y1; ++ ++ switch (dev_priv->color_fmt) { ++ case RADEON_COLOR_FORMAT_RGB565: ++ color = (((r & 0xf8) << 8) | ++ ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); ++ break; ++ case RADEON_COLOR_FORMAT_ARGB8888: ++ default: ++ color = (((0xff) << 24) | (r << 16) | (g << 8) | b); ++ break; ++ } ++ ++ BEGIN_RING(4); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); ++ OUT_RING(0xffffffff); ++ ADVANCE_RING(); ++ ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv->color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ OUT_RING(dev_priv->front_pitch_offset); ++ } else { ++ OUT_RING(dev_priv->back_pitch_offset); ++ } ++ ++ OUT_RING(color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv) ++{ ++ /* Collapse various things into a wait flag -- trying to ++ * guess if userspase slept -- better just to have them tell us. ++ */ ++ if (dev_priv->stats.last_frame_reads > 1 || ++ dev_priv->stats.last_clear_reads > dev_priv->stats.clears) { ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ } ++ ++ if (dev_priv->stats.freelist_loops) { ++ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; ++ } ++ ++ /* Purple box for page flipping ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_FLIP) ++ radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255); ++ ++ /* Red box if we have to wait for idle at any point ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE) ++ radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0); ++ ++ /* Blue box: lost context? ++ */ ++ ++ /* Yellow box for texture swaps ++ */ ++ if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD) ++ radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0); ++ ++ /* Green box if hardware never idles (as far as we can tell) ++ */ ++ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) ++ radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); ++ ++ /* Draw bars indicating number of buffers allocated ++ * (not a great measure, easily confused) ++ */ ++ if (dev_priv->stats.requested_bufs) { ++ if (dev_priv->stats.requested_bufs > 100) ++ dev_priv->stats.requested_bufs = 100; ++ ++ radeon_clear_box(dev_priv, 4, 16, ++ dev_priv->stats.requested_bufs, 4, ++ 196, 128, 128); ++ } ++ ++ memset(&dev_priv->stats, 0, sizeof(dev_priv->stats)); ++ ++} ++ ++/* ================================================================ ++ * CP command dispatch functions ++ */ ++ ++static void radeon_cp_dispatch_clear(struct drm_device * dev, ++ drm_radeon_clear_t * clear, ++ drm_radeon_clear_rect_t * depth_boxes) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ unsigned int flags = clear->flags; ++ u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("flags = 0x%x\n", flags); ++ ++ dev_priv->stats.clears++; ++ ++ if (dev_priv->sarea_priv->pfCurrentPage == 1) { ++ unsigned int tmp = flags; ++ ++ flags &= ~(RADEON_FRONT | RADEON_BACK); ++ if (tmp & RADEON_FRONT) ++ flags |= RADEON_BACK; ++ if (tmp & RADEON_BACK) ++ flags |= RADEON_FRONT; ++ } ++ ++ if (flags & (RADEON_FRONT | RADEON_BACK)) { ++ ++ BEGIN_RING(4); ++ ++ /* Ensure the 3D stream is idle before doing a ++ * 2D fill to clear the front or back buffer. ++ */ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0)); ++ OUT_RING(clear->color_mask); ++ ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n", ++ x, y, w, h, flags); ++ ++ if (flags & RADEON_FRONT) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3 ++ (RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv-> ++ color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | ++ RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ OUT_RING(dev_priv->front_pitch_offset); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ if (flags & RADEON_BACK) { ++ BEGIN_RING(6); ++ ++ OUT_RING(CP_PACKET3 ++ (RADEON_CNTL_PAINT_MULTI, 4)); ++ OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_SOLID_COLOR | ++ (dev_priv-> ++ color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_P | ++ RADEON_GMC_CLR_CMP_CNTL_DIS); ++ ++ OUT_RING(dev_priv->back_pitch_offset); ++ OUT_RING(clear->clear_color); ++ ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ } ++ } ++ ++ /* hyper z clear */ ++ /* no docs available, based on reverse engeneering by Stephane Marchesin */ ++ if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) ++ && (flags & RADEON_CLEAR_FASTZ)) { ++ ++ int i; ++ int depthpixperline = ++ dev_priv->depth_fmt == ++ RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch / ++ 2) : (dev_priv-> ++ depth_pitch / 4); ++ ++ u32 clearmask; ++ ++ u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth | ++ ((clear->depth_mask & 0xff) << 24); ++ ++ /* Make sure we restore the 3D state next time. ++ * we haven't touched any "normal" state - still need this? ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && (flags & RADEON_USE_HIERZ)) { ++ /* FIXME : reverse engineer that for Rx00 cards */ ++ /* FIXME : the mask supposedly contains low-res z values. So can't set ++ just to the max (0xff? or actually 0x3fff?), need to take z clear ++ value into account? */ ++ /* pattern seems to work for r100, though get slight ++ rendering errors with glxgears. If hierz is not enabled for r100, ++ only 4 bits which indicate clear (15,16,31,32, all zero) matter, the ++ other ones are ignored, and the same clear mask can be used. That's ++ very different behaviour than R200 which needs different clear mask ++ and different number of tiles to clear if hierz is enabled or not !?! ++ */ ++ clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f; ++ } else { ++ /* clear mask : chooses the clearing pattern. ++ rv250: could be used to clear only parts of macrotiles ++ (but that would get really complicated...)? ++ bit 0 and 1 (either or both of them ?!?!) are used to ++ not clear tile (or maybe one of the bits indicates if the tile is ++ compressed or not), bit 2 and 3 to not clear tile 1,...,. ++ Pattern is as follows: ++ | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29| ++ bits ------------------------------------------------- ++ | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31| ++ rv100: clearmask covers 2x8 4x1 tiles, but one clear still ++ covers 256 pixels ?!? ++ */ ++ clearmask = 0x0; ++ } ++ ++ BEGIN_RING(8); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE, ++ tempRB3D_DEPTHCLEARVALUE); ++ /* what offset is this exactly ? */ ++ OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0); ++ /* need ctlstat, otherwise get some strange black flickering */ ++ OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT, ++ RADEON_RB3D_ZC_FLUSH_ALL); ++ ADVANCE_RING(); ++ ++ for (i = 0; i < nbox; i++) { ++ int tileoffset, nrtilesx, nrtilesy, j; ++ /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && (dev_priv->chip_family < CHIP_R200)) { ++ /* FIXME : figure this out for r200 (when hierz is enabled). Or ++ maybe r200 actually doesn't need to put the low-res z value into ++ the tile cache like r100, but just needs to clear the hi-level z-buffer? ++ Works for R100, both with hierz and without. ++ R100 seems to operate on 2x1 8x8 tiles, but... ++ odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially ++ problematic with resolutions which are not 64 pix aligned? */ ++ tileoffset = ++ ((pbox[i].y1 >> 3) * depthpixperline + ++ pbox[i].x1) >> 6; ++ nrtilesx = ++ ((pbox[i].x2 & ~63) - ++ (pbox[i].x1 & ~63)) >> 4; ++ nrtilesy = ++ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ /* first tile */ ++ OUT_RING(tileoffset * 8); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 4); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 6; ++ } ++ } else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) { ++ /* works for rv250. */ ++ /* find first macro tile (8x2 4x4 z-pixels on rv250) */ ++ tileoffset = ++ ((pbox[i].y1 >> 3) * depthpixperline + ++ pbox[i].x1) >> 5; ++ nrtilesx = ++ (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5); ++ nrtilesy = ++ (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ /* first tile */ ++ /* judging by the first tile offset needed, could possibly ++ directly address/clear 4x4 tiles instead of 8x2 * 4x4 ++ macro tiles, though would still need clear mask for ++ right/bottom if truely 4x4 granularity is desired ? */ ++ OUT_RING(tileoffset * 16); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 1); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 5; ++ } ++ } else { /* rv 100 */ ++ /* rv100 might not need 64 pix alignment, who knows */ ++ /* offsets are, hmm, weird */ ++ tileoffset = ++ ((pbox[i].y1 >> 4) * depthpixperline + ++ pbox[i].x1) >> 6; ++ nrtilesx = ++ ((pbox[i].x2 & ~63) - ++ (pbox[i].x1 & ~63)) >> 4; ++ nrtilesy = ++ (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4); ++ for (j = 0; j <= nrtilesy; j++) { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3 ++ (RADEON_3D_CLEAR_ZMASK, 2)); ++ OUT_RING(tileoffset * 128); ++ /* the number of tiles to clear */ ++ OUT_RING(nrtilesx + 4); ++ /* clear mask : chooses the clearing pattern. */ ++ OUT_RING(clearmask); ++ ADVANCE_RING(); ++ tileoffset += depthpixperline >> 6; ++ } ++ } ++ } ++ ++ /* TODO don't always clear all hi-level z tiles */ ++ if ((dev_priv->flags & RADEON_HAS_HIERZ) ++ && ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280)) ++ && (flags & RADEON_USE_HIERZ)) ++ /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ ++ /* FIXME : the mask supposedly contains low-res z values. So can't set ++ just to the max (0xff? or actually 0x3fff?), need to take z clear ++ value into account? */ ++ { ++ BEGIN_RING(4); ++ OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2)); ++ OUT_RING(0x0); /* First tile */ ++ OUT_RING(0x3cc0); ++ OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f); ++ ADVANCE_RING(); ++ } ++ } ++ ++ /* We have to clear the depth and/or stencil buffers by ++ * rendering a quad into just those buffers. Thus, we have to ++ * make sure the 3D engine is configured correctly. ++ */ ++ else if ((dev_priv->chip_family >= CHIP_R200) && ++ (dev_priv->chip_family <= CHIP_RV280) && ++ (flags & (RADEON_DEPTH | RADEON_STENCIL))) { ++ ++ int tempPP_CNTL; ++ int tempRE_CNTL; ++ int tempRB3D_CNTL; ++ int tempRB3D_ZSTENCILCNTL; ++ int tempRB3D_STENCILREFMASK; ++ int tempRB3D_PLANEMASK; ++ int tempSE_CNTL; ++ int tempSE_VTE_CNTL; ++ int tempSE_VTX_FMT_0; ++ int tempSE_VTX_FMT_1; ++ int tempSE_VAP_CNTL; ++ int tempRE_AUX_SCISSOR_CNTL; ++ ++ tempPP_CNTL = 0; ++ tempRE_CNTL = 0; ++ ++ tempRB3D_CNTL = depth_clear->rb3d_cntl; ++ ++ tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; ++ tempRB3D_STENCILREFMASK = 0x0; ++ ++ tempSE_CNTL = depth_clear->se_cntl; ++ ++ /* Disable TCL */ ++ ++ tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */ ++ (0x9 << ++ SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT)); ++ ++ tempRB3D_PLANEMASK = 0x0; ++ ++ tempRE_AUX_SCISSOR_CNTL = 0x0; ++ ++ tempSE_VTE_CNTL = ++ SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK; ++ ++ /* Vertex format (X, Y, Z, W) */ ++ tempSE_VTX_FMT_0 = ++ SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK | ++ SE_VTX_FMT_0__VTX_W0_PRESENT_MASK; ++ tempSE_VTX_FMT_1 = 0x0; ++ ++ /* ++ * Depth buffer specific enables ++ */ ++ if (flags & RADEON_DEPTH) { ++ /* Enable depth buffer */ ++ tempRB3D_CNTL |= RADEON_Z_ENABLE; ++ } else { ++ /* Disable depth buffer */ ++ tempRB3D_CNTL &= ~RADEON_Z_ENABLE; ++ } ++ ++ /* ++ * Stencil buffer specific enables ++ */ ++ if (flags & RADEON_STENCIL) { ++ tempRB3D_CNTL |= RADEON_STENCIL_ENABLE; ++ tempRB3D_STENCILREFMASK = clear->depth_mask; ++ } else { ++ tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE; ++ tempRB3D_STENCILREFMASK = 0x00000000; ++ } ++ ++ if (flags & RADEON_USE_COMP_ZBUF) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | ++ RADEON_Z_DECOMPRESSION_ENABLE; ++ } ++ if (flags & RADEON_USE_HIERZ) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; ++ } ++ ++ BEGIN_RING(26); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL); ++ OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL); ++ OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL); ++ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); ++ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, ++ tempRB3D_STENCILREFMASK); ++ OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK); ++ OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL); ++ OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL); ++ OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0); ++ OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1); ++ OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL); ++ OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL); ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ ++ /* Funny that this should be required -- ++ * sets top-left? ++ */ ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ BEGIN_RING(14); ++ OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12)); ++ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | ++ RADEON_PRIM_WALK_RING | ++ (3 << RADEON_NUM_VERTICES_SHIFT))); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ OUT_RING(depth_boxes[i].ui[CLEAR_X2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x3f800000); ++ ADVANCE_RING(); ++ } ++ } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) { ++ ++ int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl; ++ ++ rb3d_cntl = depth_clear->rb3d_cntl; ++ ++ if (flags & RADEON_DEPTH) { ++ rb3d_cntl |= RADEON_Z_ENABLE; ++ } else { ++ rb3d_cntl &= ~RADEON_Z_ENABLE; ++ } ++ ++ if (flags & RADEON_STENCIL) { ++ rb3d_cntl |= RADEON_STENCIL_ENABLE; ++ rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */ ++ } else { ++ rb3d_cntl &= ~RADEON_STENCIL_ENABLE; ++ rb3d_stencilrefmask = 0x00000000; ++ } ++ ++ if (flags & RADEON_USE_COMP_ZBUF) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE | ++ RADEON_Z_DECOMPRESSION_ENABLE; ++ } ++ if (flags & RADEON_USE_HIERZ) { ++ tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE; ++ } ++ ++ BEGIN_RING(13); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1)); ++ OUT_RING(0x00000000); ++ OUT_RING(rb3d_cntl); ++ ++ OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL); ++ OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask); ++ OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000); ++ OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl); ++ ADVANCE_RING(); ++ ++ /* Make sure we restore the 3D state next time. ++ */ ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ for (i = 0; i < nbox; i++) { ++ ++ /* Funny that this should be required -- ++ * sets top-left? ++ */ ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ BEGIN_RING(15); ++ ++ OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13)); ++ OUT_RING(RADEON_VTX_Z_PRESENT | ++ RADEON_VTX_PKCOLOR_PRESENT); ++ OUT_RING((RADEON_PRIM_TYPE_RECT_LIST | ++ RADEON_PRIM_WALK_RING | ++ RADEON_MAOS_ENABLE | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (3 << RADEON_NUM_VERTICES_SHIFT))); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X1]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ OUT_RING(depth_boxes[i].ui[CLEAR_X2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_Y2]); ++ OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]); ++ OUT_RING(0x0); ++ ++ ADVANCE_RING(); ++ } ++ } ++ ++ /* Increment the clear counter. The client-side 3D driver must ++ * wait on this value before performing the clear ioctl. We ++ * need this because the card's so damned fast... ++ */ ++ dev_priv->sarea_priv->last_clear++; ++ ++ BEGIN_RING(4); ++ ++ RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_dispatch_swap(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int nbox = sarea_priv->nbox; ++ struct drm_clip_rect *pbox = sarea_priv->boxes; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ /* Do some trivial performance monitoring... ++ */ ++ if (dev_priv->do_boxes) ++ radeon_cp_performance_boxes(dev_priv); ++ ++ /* Wait for the 3D stream to idle before dispatching the bitblt. ++ * This will prevent data corruption between the two streams. ++ */ ++ BEGIN_RING(2); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ ADVANCE_RING(); ++ ++ for (i = 0; i < nbox; i++) { ++ int x = pbox[i].x1; ++ int y = pbox[i].y1; ++ int w = pbox[i].x2 - x; ++ int h = pbox[i].y2 - y; ++ ++ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h); ++ ++ BEGIN_RING(9); ++ ++ OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0)); ++ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_NONE | ++ (dev_priv->color_fmt << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_S | ++ RADEON_DP_SRC_SOURCE_MEMORY | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ ++ /* Make this work even if front & back are flipped: ++ */ ++ OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); ++ if (dev_priv->sarea_priv->pfCurrentPage == 0) { ++ OUT_RING(dev_priv->back_pitch_offset); ++ OUT_RING(dev_priv->front_pitch_offset); ++ } else { ++ OUT_RING(dev_priv->front_pitch_offset); ++ OUT_RING(dev_priv->back_pitch_offset); ++ } ++ ++ OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2)); ++ OUT_RING((x << 16) | y); ++ OUT_RING((x << 16) | y); ++ OUT_RING((w << 16) | h); ++ ++ ADVANCE_RING(); ++ } ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ ++ BEGIN_RING(4); ++ ++ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_cp_dispatch_flip(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle; ++ int offset = (dev_priv->sarea_priv->pfCurrentPage == 1) ++ ? dev_priv->front_offset : dev_priv->back_offset; ++ RING_LOCALS; ++ DRM_DEBUG("pfCurrentPage=%d\n", ++ dev_priv->sarea_priv->pfCurrentPage); ++ ++ /* Do some trivial performance monitoring... ++ */ ++ if (dev_priv->do_boxes) { ++ dev_priv->stats.boxes |= RADEON_BOX_FLIP; ++ radeon_cp_performance_boxes(dev_priv); ++ } ++ ++ /* Update the frame offsets for both CRTCs ++ */ ++ BEGIN_RING(6); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING_REG(RADEON_CRTC_OFFSET, ++ ((sarea->frame.y * dev_priv->front_pitch + ++ sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7) ++ + offset); ++ OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base ++ + offset); ++ ++ ADVANCE_RING(); ++ ++ /* Increment the frame counter. The client-side 3D driver must ++ * throttle the framerate by waiting for this value before ++ * performing the swapbuffer ioctl. ++ */ ++ dev_priv->sarea_priv->last_frame++; ++ dev_priv->sarea_priv->pfCurrentPage = ++ 1 - dev_priv->sarea_priv->pfCurrentPage; ++ ++ BEGIN_RING(2); ++ ++ RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame); ++ ++ ADVANCE_RING(); ++} ++ ++static int bad_prim_vertex_nr(int primitive, int nr) ++{ ++ switch (primitive & RADEON_PRIM_TYPE_MASK) { ++ case RADEON_PRIM_TYPE_NONE: ++ case RADEON_PRIM_TYPE_POINT: ++ return nr < 1; ++ case RADEON_PRIM_TYPE_LINE: ++ return (nr & 1) || nr == 0; ++ case RADEON_PRIM_TYPE_LINE_STRIP: ++ return nr < 2; ++ case RADEON_PRIM_TYPE_TRI_LIST: ++ case RADEON_PRIM_TYPE_3VRT_POINT_LIST: ++ case RADEON_PRIM_TYPE_3VRT_LINE_LIST: ++ case RADEON_PRIM_TYPE_RECT_LIST: ++ return nr % 3 || nr == 0; ++ case RADEON_PRIM_TYPE_TRI_FAN: ++ case RADEON_PRIM_TYPE_TRI_STRIP: ++ return nr < 3; ++ default: ++ return 1; ++ } ++} ++ ++typedef struct { ++ unsigned int start; ++ unsigned int finish; ++ unsigned int prim; ++ unsigned int numverts; ++ unsigned int offset; ++ unsigned int vc_format; ++} drm_radeon_tcl_prim_t; ++ ++static void radeon_cp_dispatch_vertex(struct drm_device * dev, ++ struct drm_buf * buf, ++ drm_radeon_tcl_prim_t * prim) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start; ++ int numverts = (int)prim->numverts; ++ int nbox = sarea_priv->nbox; ++ int i = 0; ++ RING_LOCALS; ++ ++ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n", ++ prim->prim, ++ prim->vc_format, prim->start, prim->finish, prim->numverts); ++ ++ if (bad_prim_vertex_nr(prim->prim, prim->numverts)) { ++ DRM_ERROR("bad prim %x numverts %d\n", ++ prim->prim, prim->numverts); ++ return; ++ } ++ ++ do { ++ /* Emit the next cliprect */ ++ if (i < nbox) { ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ } ++ ++ /* Emit the vertex buffer rendering commands */ ++ BEGIN_RING(5); ++ ++ OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3)); ++ OUT_RING(offset); ++ OUT_RING(numverts); ++ OUT_RING(prim->vc_format); ++ OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST | ++ RADEON_COLOR_ORDER_RGBA | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (numverts << RADEON_NUM_VERTICES_SHIFT)); ++ ++ ADVANCE_RING(); ++ ++ i++; ++ } while (i < nbox); ++} ++ ++static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_buf_priv_t *buf_priv = buf->dev_private; ++ RING_LOCALS; ++ ++ buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; ++ ++ /* Emit the vertex buffer age */ ++ BEGIN_RING(2); ++ RADEON_DISPATCH_AGE(buf_priv->age); ++ ADVANCE_RING(); ++ ++ buf->pending = 1; ++ buf->used = 0; ++} ++ ++static void radeon_cp_dispatch_indirect(struct drm_device * dev, ++ struct drm_buf * buf, int start, int end) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); ++ ++ if (start != end) { ++ int offset = (dev_priv->gart_buffers_offset ++ + buf->offset + start); ++ int dwords = (end - start + 3) / sizeof(u32); ++ ++ /* Indirect buffer data must be an even number of ++ * dwords, so if we've been given an odd number we must ++ * pad the data with a Type-2 CP packet. ++ */ ++ if (dwords & 1) { ++ u32 *data = (u32 *) ++ ((char *)dev->agp_buffer_map->handle ++ + buf->offset + start); ++ data[dwords++] = RADEON_CP_PACKET2; ++ } ++ ++ /* Fire off the indirect buffer */ ++ BEGIN_RING(3); ++ ++ OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1)); ++ OUT_RING(offset); ++ OUT_RING(dwords); ++ ++ ADVANCE_RING(); ++ } ++} ++ ++static void radeon_cp_dispatch_indices(struct drm_device * dev, ++ struct drm_buf * elt_buf, ++ drm_radeon_tcl_prim_t * prim) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ int offset = dev_priv->gart_buffers_offset + prim->offset; ++ u32 *data; ++ int dwords; ++ int i = 0; ++ int start = prim->start + RADEON_INDEX_PRIM_OFFSET; ++ int count = (prim->finish - start) / sizeof(u16); ++ int nbox = sarea_priv->nbox; ++ ++ DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n", ++ prim->prim, ++ prim->vc_format, ++ prim->start, prim->finish, prim->offset, prim->numverts); ++ ++ if (bad_prim_vertex_nr(prim->prim, count)) { ++ DRM_ERROR("bad prim %x count %d\n", prim->prim, count); ++ return; ++ } ++ ++ if (start >= prim->finish || (prim->start & 0x7)) { ++ DRM_ERROR("buffer prim %d\n", prim->prim); ++ return; ++ } ++ ++ dwords = (prim->finish - prim->start + 3) / sizeof(u32); ++ ++ data = (u32 *) ((char *)dev->agp_buffer_map->handle + ++ elt_buf->offset + prim->start); ++ ++ data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); ++ data[1] = offset; ++ data[2] = prim->numverts; ++ data[3] = prim->vc_format; ++ data[4] = (prim->prim | ++ RADEON_PRIM_WALK_IND | ++ RADEON_COLOR_ORDER_RGBA | ++ RADEON_VTX_FMT_RADEON_MODE | ++ (count << RADEON_NUM_VERTICES_SHIFT)); ++ ++ do { ++ if (i < nbox) ++ radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); ++ ++ radeon_cp_dispatch_indirect(dev, elt_buf, ++ prim->start, prim->finish); ++ ++ i++; ++ } while (i < nbox); ++ ++} ++ ++#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE ++ ++static int radeon_cp_dispatch_texture(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_radeon_texture_t * tex, ++ drm_radeon_tex_image_t * image) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_buf *buf; ++ u32 format; ++ u32 *buffer; ++ const u8 __user *data; ++ int size, dwords, tex_width, blit_width, spitch; ++ u32 height; ++ int i; ++ u32 texpitch, microtile; ++ u32 offset, byte_offset; ++ RING_LOCALS; ++ ++ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) { ++ DRM_ERROR("Invalid destination offset\n"); ++ return -EINVAL; ++ } ++ ++ dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD; ++ ++ /* Flush the pixel cache. This ensures no pixel data gets mixed ++ * up with the texture data from the host data blit, otherwise ++ * part of the texture image may be corrupted. ++ */ ++ BEGIN_RING(4); ++ RADEON_FLUSH_CACHE(); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ ++ /* The compiler won't optimize away a division by a variable, ++ * even if the only legal values are powers of two. Thus, we'll ++ * use a shift instead. ++ */ ++ switch (tex->format) { ++ case RADEON_TXFORMAT_ARGB8888: ++ case RADEON_TXFORMAT_RGBA8888: ++ format = RADEON_COLOR_FORMAT_ARGB8888; ++ tex_width = tex->width * 4; ++ blit_width = image->width * 4; ++ break; ++ case RADEON_TXFORMAT_AI88: ++ case RADEON_TXFORMAT_ARGB1555: ++ case RADEON_TXFORMAT_RGB565: ++ case RADEON_TXFORMAT_ARGB4444: ++ case RADEON_TXFORMAT_VYUY422: ++ case RADEON_TXFORMAT_YVYU422: ++ format = RADEON_COLOR_FORMAT_RGB565; ++ tex_width = tex->width * 2; ++ blit_width = image->width * 2; ++ break; ++ case RADEON_TXFORMAT_I8: ++ case RADEON_TXFORMAT_RGB332: ++ format = RADEON_COLOR_FORMAT_CI8; ++ tex_width = tex->width * 1; ++ blit_width = image->width * 1; ++ break; ++ default: ++ DRM_ERROR("invalid texture format %d\n", tex->format); ++ return -EINVAL; ++ } ++ spitch = blit_width >> 6; ++ if (spitch == 0 && image->height > 1) ++ return -EINVAL; ++ ++ texpitch = tex->pitch; ++ if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { ++ microtile = 1; ++ if (tex_width < 64) { ++ texpitch &= ~(RADEON_DST_TILE_MICRO >> 22); ++ /* we got tiled coordinates, untile them */ ++ image->x *= 2; ++ } ++ } else ++ microtile = 0; ++ ++ /* this might fail for zero-sized uploads - are those illegal? */ ++ if (!radeon_check_offset(dev_priv, tex->offset + image->height * ++ blit_width - 1)) { ++ DRM_ERROR("Invalid final destination offset\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width); ++ ++ do { ++ DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n", ++ tex->offset >> 10, tex->pitch, tex->format, ++ image->x, image->y, image->width, image->height); ++ ++ /* Make a copy of some parameters in case we have to ++ * update them for a multi-pass texture blit. ++ */ ++ height = image->height; ++ data = (const u8 __user *)image->data; ++ ++ size = height * blit_width; ++ ++ if (size > RADEON_MAX_TEXTURE_SIZE) { ++ height = RADEON_MAX_TEXTURE_SIZE / blit_width; ++ size = height * blit_width; ++ } else if (size < 4 && size > 0) { ++ size = 4; ++ } else if (size == 0) { ++ return 0; ++ } ++ ++ buf = radeon_freelist_get(dev); ++ if (0 && !buf) { ++ radeon_do_cp_idle(dev_priv); ++ buf = radeon_freelist_get(dev); ++ } ++ if (!buf) { ++ DRM_DEBUG("EAGAIN\n"); ++ if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image))) ++ return -EFAULT; ++ return -EAGAIN; ++ } ++ ++ /* Dispatch the indirect buffer. ++ */ ++ buffer = ++ (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); ++ dwords = size / 4; ++ ++#define RADEON_COPY_MT(_buf, _data, _width) \ ++ do { \ ++ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\ ++ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \ ++ return -EFAULT; \ ++ } \ ++ } while(0) ++ ++ if (microtile) { ++ /* texture micro tiling in use, minimum texture width is thus 16 bytes. ++ however, we cannot use blitter directly for texture width < 64 bytes, ++ since minimum tex pitch is 64 bytes and we need this to match ++ the texture width, otherwise the blitter will tile it wrong. ++ Thus, tiling manually in this case. Additionally, need to special ++ case tex height = 1, since our actual image will have height 2 ++ and we need to ensure we don't read beyond the texture size ++ from user space. */ ++ if (tex->height == 1) { ++ if (tex_width >= 64 || tex_width <= 16) { ++ RADEON_COPY_MT(buffer, data, ++ (int)(tex_width * sizeof(u32))); ++ } else if (tex_width == 32) { ++ RADEON_COPY_MT(buffer, data, 16); ++ RADEON_COPY_MT(buffer + 8, ++ data + 16, 16); ++ } ++ } else if (tex_width >= 64 || tex_width == 16) { ++ RADEON_COPY_MT(buffer, data, ++ (int)(dwords * sizeof(u32))); ++ } else if (tex_width < 16) { ++ for (i = 0; i < tex->height; i++) { ++ RADEON_COPY_MT(buffer, data, tex_width); ++ buffer += 4; ++ data += tex_width; ++ } ++ } else if (tex_width == 32) { ++ /* TODO: make sure this works when not fitting in one buffer ++ (i.e. 32bytes x 2048...) */ ++ for (i = 0; i < tex->height; i += 2) { ++ RADEON_COPY_MT(buffer, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 8, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 4, data, 16); ++ data += 16; ++ RADEON_COPY_MT(buffer + 12, data, 16); ++ data += 16; ++ buffer += 16; ++ } ++ } ++ } else { ++ if (tex_width >= 32) { ++ /* Texture image width is larger than the minimum, so we ++ * can upload it directly. ++ */ ++ RADEON_COPY_MT(buffer, data, ++ (int)(dwords * sizeof(u32))); ++ } else { ++ /* Texture image width is less than the minimum, so we ++ * need to pad out each image scanline to the minimum ++ * width. ++ */ ++ for (i = 0; i < tex->height; i++) { ++ RADEON_COPY_MT(buffer, data, tex_width); ++ buffer += 8; ++ data += tex_width; ++ } ++ } ++ } ++ ++#undef RADEON_COPY_MT ++ byte_offset = (image->y & ~2047) * blit_width; ++ buf->file_priv = file_priv; ++ buf->used = size; ++ offset = dev_priv->gart_buffers_offset + buf->offset; ++ BEGIN_RING(9); ++ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); ++ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | ++ RADEON_GMC_DST_PITCH_OFFSET_CNTL | ++ RADEON_GMC_BRUSH_NONE | ++ (format << 8) | ++ RADEON_GMC_SRC_DATATYPE_COLOR | ++ RADEON_ROP3_S | ++ RADEON_DP_SRC_SOURCE_MEMORY | ++ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS); ++ OUT_RING((spitch << 22) | (offset >> 10)); ++ OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10))); ++ OUT_RING(0); ++ OUT_RING((image->x << 16) | (image->y % 2048)); ++ OUT_RING((image->width << 16) | height); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ radeon_cp_discard_buffer(dev, buf); ++ ++ /* Update the input parameters for next time */ ++ image->y += height; ++ image->height -= height; ++ image->data = (const u8 __user *)image->data + size; ++ } while (image->height > 0); ++ ++ /* Flush the pixel cache after the blit completes. This ensures ++ * the texture data is written out to memory before rendering ++ * continues. ++ */ ++ BEGIN_RING(4); ++ RADEON_FLUSH_CACHE(); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ COMMIT_RING(); ++ ++ return 0; ++} ++ ++static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ int i; ++ RING_LOCALS; ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(35); ++ ++ OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0)); ++ OUT_RING(0x00000000); ++ ++ OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31)); ++ for (i = 0; i < 32; i++) { ++ OUT_RING(stipple[i]); ++ } ++ ++ ADVANCE_RING(); ++} ++ ++static void radeon_apply_surface_regs(int surf_index, ++ drm_radeon_private_t *dev_priv) ++{ ++ if (!dev_priv->mmio) ++ return; ++ ++ radeon_do_cp_idle(dev_priv); ++ ++ RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index, ++ dev_priv->surfaces[surf_index].flags); ++ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index, ++ dev_priv->surfaces[surf_index].lower); ++ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index, ++ dev_priv->surfaces[surf_index].upper); ++} ++ ++/* Allocates a virtual surface ++ * doesn't always allocate a real surface, will stretch an existing ++ * surface when possible. ++ * ++ * Note that refcount can be at most 2, since during a free refcount=3 ++ * might mean we have to allocate a new surface which might not always ++ * be available. ++ * For example : we allocate three contigous surfaces ABC. If B is ++ * freed, we suddenly need two surfaces to store A and C, which might ++ * not always be available. ++ */ ++static int alloc_surface(drm_radeon_surface_alloc_t *new, ++ drm_radeon_private_t *dev_priv, ++ struct drm_file *file_priv) ++{ ++ struct radeon_virt_surface *s; ++ int i; ++ int virt_surface_index; ++ uint32_t new_upper, new_lower; ++ ++ new_lower = new->address; ++ new_upper = new_lower + new->size - 1; ++ ++ /* sanity check */ ++ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) || ++ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != ++ RADEON_SURF_ADDRESS_FIXED_MASK) ++ || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0)) ++ return -1; ++ ++ /* make sure there is no overlap with existing surfaces */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ if ((dev_priv->surfaces[i].refcount != 0) && ++ (((new_lower >= dev_priv->surfaces[i].lower) && ++ (new_lower < dev_priv->surfaces[i].upper)) || ++ ((new_lower < dev_priv->surfaces[i].lower) && ++ (new_upper > dev_priv->surfaces[i].lower)))) { ++ return -1; ++ } ++ } ++ ++ /* find a virtual surface */ ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) ++ if (dev_priv->virt_surfaces[i].file_priv == 0) ++ break; ++ if (i == 2 * RADEON_MAX_SURFACES) { ++ return -1; ++ } ++ virt_surface_index = i; ++ ++ /* try to reuse an existing surface */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ /* extend before */ ++ if ((dev_priv->surfaces[i].refcount == 1) && ++ (new->flags == dev_priv->surfaces[i].flags) && ++ (new_upper + 1 == dev_priv->surfaces[i].lower)) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount++; ++ dev_priv->surfaces[i].lower = s->lower; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ ++ /* extend after */ ++ if ((dev_priv->surfaces[i].refcount == 1) && ++ (new->flags == dev_priv->surfaces[i].flags) && ++ (new_lower == dev_priv->surfaces[i].upper + 1)) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount++; ++ dev_priv->surfaces[i].upper = s->upper; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ } ++ ++ /* okay, we need a new one */ ++ for (i = 0; i < RADEON_MAX_SURFACES; i++) { ++ if (dev_priv->surfaces[i].refcount == 0) { ++ s = &(dev_priv->virt_surfaces[virt_surface_index]); ++ s->surface_index = i; ++ s->lower = new_lower; ++ s->upper = new_upper; ++ s->flags = new->flags; ++ s->file_priv = file_priv; ++ dev_priv->surfaces[i].refcount = 1; ++ dev_priv->surfaces[i].lower = s->lower; ++ dev_priv->surfaces[i].upper = s->upper; ++ dev_priv->surfaces[i].flags = s->flags; ++ radeon_apply_surface_regs(s->surface_index, dev_priv); ++ return virt_surface_index; ++ } ++ } ++ ++ /* we didn't find anything */ ++ return -1; ++} ++ ++static int free_surface(struct drm_file *file_priv, ++ drm_radeon_private_t * dev_priv, ++ int lower) ++{ ++ struct radeon_virt_surface *s; ++ int i; ++ /* find the virtual surface */ ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { ++ s = &(dev_priv->virt_surfaces[i]); ++ if (s->file_priv) { ++ if ((lower == s->lower) && (file_priv == s->file_priv)) ++ { ++ if (dev_priv->surfaces[s->surface_index]. ++ lower == s->lower) ++ dev_priv->surfaces[s->surface_index]. ++ lower = s->upper; ++ ++ if (dev_priv->surfaces[s->surface_index]. ++ upper == s->upper) ++ dev_priv->surfaces[s->surface_index]. ++ upper = s->lower; ++ ++ dev_priv->surfaces[s->surface_index].refcount--; ++ if (dev_priv->surfaces[s->surface_index]. ++ refcount == 0) ++ dev_priv->surfaces[s->surface_index]. ++ flags = 0; ++ s->file_priv = NULL; ++ radeon_apply_surface_regs(s->surface_index, ++ dev_priv); ++ return 0; ++ } ++ } ++ } ++ return 1; ++} ++ ++static void radeon_surfaces_release(struct drm_file *file_priv, ++ drm_radeon_private_t * dev_priv) ++{ ++ int i; ++ for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) { ++ if (dev_priv->virt_surfaces[i].file_priv == file_priv) ++ free_surface(file_priv, dev_priv, ++ dev_priv->virt_surfaces[i].lower); ++ } ++} ++ ++/* ================================================================ ++ * IOCTL functions ++ */ ++static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_surface_alloc_t *alloc = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (alloc_surface(alloc, dev_priv, file_priv) == -1) ++ return -EINVAL; ++ else ++ return 0; ++} ++ ++static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_surface_free_t *memfree = data; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ if (free_surface(file_priv, dev_priv, memfree->address)) ++ return -EINVAL; ++ else ++ return 0; ++} ++ ++static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ drm_radeon_clear_t *clear = data; ++ drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; ++ ++ if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, ++ sarea_priv->nbox * sizeof(depth_boxes[0]))) ++ return -EFAULT; ++ ++ radeon_cp_dispatch_clear(dev, clear, depth_boxes); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++/* Not sure why this isn't set all the time: ++ */ ++static int radeon_do_init_pageflip(struct drm_device * dev) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ BEGIN_RING(6); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0)); ++ OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) | ++ RADEON_CRTC_OFFSET_FLIP_CNTL); ++ OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0)); ++ OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) | ++ RADEON_CRTC_OFFSET_FLIP_CNTL); ++ ADVANCE_RING(); ++ ++ dev_priv->page_flipping = 1; ++ ++ if (dev_priv->sarea_priv->pfCurrentPage != 1) ++ dev_priv->sarea_priv->pfCurrentPage = 0; ++ ++ return 0; ++} ++ ++/* Swapping and flipping are different operations, need different ioctls. ++ * They can & should be intermixed to support multiple 3d windows. ++ */ ++static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (!dev_priv->page_flipping) ++ radeon_do_init_pageflip(dev); ++ ++ radeon_cp_dispatch_flip(dev); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; ++ ++ radeon_cp_dispatch_swap(dev); ++ dev_priv->sarea_priv->ctx_owner = 0; ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_vertex_t *vertex = data; ++ drm_radeon_tcl_prim_t prim; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { ++ DRM_ERROR("buffer prim %d\n", vertex->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ /* Build up a prim_t record: ++ */ ++ if (vertex->count) { ++ buf->used = vertex->count; /* not used? */ ++ ++ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { ++ if (radeon_emit_state(dev_priv, file_priv, ++ &sarea_priv->context_state, ++ sarea_priv->tex_state, ++ sarea_priv->dirty)) { ++ DRM_ERROR("radeon_emit_state failed\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ++ RADEON_UPLOAD_TEX1IMAGES | ++ RADEON_UPLOAD_TEX2IMAGES | ++ RADEON_REQUIRE_QUIESCENCE); ++ } ++ ++ prim.start = 0; ++ prim.finish = vertex->count; /* unused */ ++ prim.prim = vertex->prim; ++ prim.numverts = vertex->count; ++ prim.vc_format = dev_priv->sarea_priv->vc_format; ++ ++ radeon_cp_dispatch_vertex(dev, buf, &prim); ++ } ++ ++ if (vertex->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_indices_t *elts = data; ++ drm_radeon_tcl_prim_t prim; ++ int count; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", ++ DRM_CURRENTPID, elts->idx, elts->start, elts->end, ++ elts->discard); ++ ++ if (elts->idx < 0 || elts->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ elts->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) { ++ DRM_ERROR("buffer prim %d\n", elts->prim); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[elts->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", elts->idx); ++ return -EINVAL; ++ } ++ ++ count = (elts->end - elts->start) / sizeof(u16); ++ elts->start -= RADEON_INDEX_PRIM_OFFSET; ++ ++ if (elts->start & 0x7) { ++ DRM_ERROR("misaligned buffer 0x%x\n", elts->start); ++ return -EINVAL; ++ } ++ if (elts->start < buf->used) { ++ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); ++ return -EINVAL; ++ } ++ ++ buf->used = elts->end; ++ ++ if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) { ++ if (radeon_emit_state(dev_priv, file_priv, ++ &sarea_priv->context_state, ++ sarea_priv->tex_state, ++ sarea_priv->dirty)) { ++ DRM_ERROR("radeon_emit_state failed\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES | ++ RADEON_UPLOAD_TEX1IMAGES | ++ RADEON_UPLOAD_TEX2IMAGES | ++ RADEON_REQUIRE_QUIESCENCE); ++ } ++ ++ /* Build up a prim_t record: ++ */ ++ prim.start = elts->start; ++ prim.finish = elts->end; ++ prim.prim = elts->prim; ++ prim.offset = 0; /* offset from start of dma buffers */ ++ prim.numverts = RADEON_MAX_VB_VERTS; /* duh */ ++ prim.vc_format = dev_priv->sarea_priv->vc_format; ++ ++ radeon_cp_dispatch_indices(dev, buf, &prim); ++ if (elts->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_texture_t *tex = data; ++ drm_radeon_tex_image_t image; ++ int ret; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (tex->image == NULL) { ++ DRM_ERROR("null texture image!\n"); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_FROM_USER(&image, ++ (drm_radeon_tex_image_t __user *) tex->image, ++ sizeof(image))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); ++ ++ return ret; ++} ++ ++static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_stipple_t *stipple = data; ++ u32 mask[32]; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) ++ return -EFAULT; ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ ++ radeon_cp_dispatch_stipple(dev, mask); ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_indirect_t *indirect = data; ++ RING_LOCALS; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", ++ indirect->idx, indirect->start, indirect->end, ++ indirect->discard); ++ ++ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ indirect->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ buf = dma->buflist[indirect->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", indirect->idx); ++ return -EINVAL; ++ } ++ ++ if (indirect->start < buf->used) { ++ DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", ++ indirect->start, buf->used); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf->used = indirect->end; ++ ++ /* Wait for the 3D stream to idle before the indirect buffer ++ * containing 2D acceleration commands is processed. ++ */ ++ BEGIN_RING(2); ++ ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ++ ADVANCE_RING(); ++ ++ /* Dispatch the indirect buffer full of commands from the ++ * X server. This is insecure and is thus only available to ++ * privileged clients. ++ */ ++ radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); ++ if (indirect->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_sarea_t *sarea_priv; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_radeon_vertex2_t *vertex = data; ++ int i; ++ unsigned char laststate; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ sarea_priv = dev_priv->sarea_priv; ++ ++ DRM_DEBUG("pid=%d index=%d discard=%d\n", ++ DRM_CURRENTPID, vertex->idx, vertex->discard); ++ ++ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ vertex->idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ buf = dma->buflist[vertex->idx]; ++ ++ if (buf->file_priv != file_priv) { ++ DRM_ERROR("process %d using buffer owned by %p\n", ++ DRM_CURRENTPID, buf->file_priv); ++ return -EINVAL; ++ } ++ ++ if (buf->pending) { ++ DRM_ERROR("sending pending buffer %d\n", vertex->idx); ++ return -EINVAL; ++ } ++ ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) ++ return -EINVAL; ++ ++ for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) { ++ drm_radeon_prim_t prim; ++ drm_radeon_tcl_prim_t tclprim; ++ ++ if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim))) ++ return -EFAULT; ++ ++ if (prim.stateidx != laststate) { ++ drm_radeon_state_t state; ++ ++ if (DRM_COPY_FROM_USER(&state, ++ &vertex->state[prim.stateidx], ++ sizeof(state))) ++ return -EFAULT; ++ ++ if (radeon_emit_state2(dev_priv, file_priv, &state)) { ++ DRM_ERROR("radeon_emit_state2 failed\n"); ++ return -EINVAL; ++ } ++ ++ laststate = prim.stateidx; ++ } ++ ++ tclprim.start = prim.start; ++ tclprim.finish = prim.finish; ++ tclprim.prim = prim.prim; ++ tclprim.vc_format = prim.vc_format; ++ ++ if (prim.prim & RADEON_PRIM_WALK_IND) { ++ tclprim.offset = prim.numverts * 64; ++ tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */ ++ ++ radeon_cp_dispatch_indices(dev, buf, &tclprim); ++ } else { ++ tclprim.numverts = prim.numverts; ++ tclprim.offset = 0; /* not used */ ++ ++ radeon_cp_dispatch_vertex(dev, buf, &tclprim); ++ } ++ ++ if (sarea_priv->nbox == 1) ++ sarea_priv->nbox = 0; ++ } ++ ++ if (vertex->discard) { ++ radeon_cp_discard_buffer(dev, buf); ++ } ++ ++ COMMIT_RING(); ++ return 0; ++} ++ ++static int radeon_emit_packets(drm_radeon_private_t * dev_priv, ++ struct drm_file *file_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int id = (int)header.packet.packet_id; ++ int sz, reg; ++ int *data = (int *)cmdbuf->buf; ++ RING_LOCALS; ++ ++ if (id >= RADEON_MAX_STATE_PACKETS) ++ return -EINVAL; ++ ++ sz = packet[id].len; ++ reg = packet[id].start; ++ ++ if (sz * sizeof(int) > cmdbuf->bufsz) { ++ DRM_ERROR("Packet size provided larger than data provided\n"); ++ return -EINVAL; ++ } ++ ++ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { ++ DRM_ERROR("Packet verification failed\n"); ++ return -EINVAL; ++ } ++ ++ BEGIN_RING(sz + 1); ++ OUT_RING(CP_PACKET0(reg, (sz - 1))); ++ OUT_RING_TABLE(data, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.scalars.count; ++ int start = header.scalars.offset; ++ int stride = header.scalars.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(3 + sz); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++/* God this is ugly ++ */ ++static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.scalars.count; ++ int start = ((unsigned int)header.scalars.offset) + 0x100; ++ int stride = header.scalars.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(3 + sz); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.vectors.count; ++ int start = header.vectors.offset; ++ int stride = header.vectors.stride; ++ RING_LOCALS; ++ ++ BEGIN_RING(5 + sz); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); ++ OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, ++ drm_radeon_cmd_header_t header, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ int sz = header.veclinear.count * 4; ++ int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8); ++ RING_LOCALS; ++ ++ if (!sz) ++ return 0; ++ if (sz * 4 > cmdbuf->bufsz) ++ return -EINVAL; ++ ++ BEGIN_RING(5 + sz); ++ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); ++ OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); ++ OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); ++ OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); ++ OUT_RING_TABLE(cmdbuf->buf, sz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += sz * sizeof(int); ++ cmdbuf->bufsz -= sz * sizeof(int); ++ return 0; ++} ++ ++static int radeon_emit_packet3(struct drm_device * dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ unsigned int cmdsz; ++ int ret; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, ++ cmdbuf, &cmdsz))) { ++ DRM_ERROR("Packet verification failed\n"); ++ return ret; ++ } ++ ++ BEGIN_RING(cmdsz); ++ OUT_RING_TABLE(cmdbuf->buf, cmdsz); ++ ADVANCE_RING(); ++ ++ cmdbuf->buf += cmdsz * 4; ++ cmdbuf->bufsz -= cmdsz * 4; ++ return 0; ++} ++ ++static int radeon_emit_packet3_cliprect(struct drm_device *dev, ++ struct drm_file *file_priv, ++ drm_radeon_kcmd_buffer_t *cmdbuf, ++ int orig_nbox) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_clip_rect box; ++ unsigned int cmdsz; ++ int ret; ++ struct drm_clip_rect __user *boxes = cmdbuf->boxes; ++ int i = 0; ++ RING_LOCALS; ++ ++ DRM_DEBUG("\n"); ++ ++ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv, ++ cmdbuf, &cmdsz))) { ++ DRM_ERROR("Packet verification failed\n"); ++ return ret; ++ } ++ ++ if (!orig_nbox) ++ goto out; ++ ++ do { ++ if (i < cmdbuf->nbox) { ++ if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box))) ++ return -EFAULT; ++ /* FIXME The second and subsequent times round ++ * this loop, send a WAIT_UNTIL_3D_IDLE before ++ * calling emit_clip_rect(). This fixes a ++ * lockup on fast machines when sending ++ * several cliprects with a cmdbuf, as when ++ * waving a 2D window over a 3D ++ * window. Something in the commands from user ++ * space seems to hang the card when they're ++ * sent several times in a row. That would be ++ * the correct place to fix it but this works ++ * around it until I can figure that out - Tim ++ * Smith */ ++ if (i) { ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++ } ++ radeon_emit_clip_rect(dev_priv, &box); ++ } ++ ++ BEGIN_RING(cmdsz); ++ OUT_RING_TABLE(cmdbuf->buf, cmdsz); ++ ADVANCE_RING(); ++ ++ } while (++i < cmdbuf->nbox); ++ if (cmdbuf->nbox == 1) ++ cmdbuf->nbox = 0; ++ ++ out: ++ cmdbuf->buf += cmdsz * 4; ++ cmdbuf->bufsz -= cmdsz * 4; ++ return 0; ++} ++ ++static int radeon_emit_wait(struct drm_device * dev, int flags) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ RING_LOCALS; ++ ++ DRM_DEBUG("%x\n", flags); ++ switch (flags) { ++ case RADEON_WAIT_2D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_2D_IDLE(); ++ ADVANCE_RING(); ++ break; ++ case RADEON_WAIT_3D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_3D_IDLE(); ++ ADVANCE_RING(); ++ break; ++ case RADEON_WAIT_2D | RADEON_WAIT_3D: ++ BEGIN_RING(2); ++ RADEON_WAIT_UNTIL_IDLE(); ++ ADVANCE_RING(); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf = NULL; ++ int idx; ++ drm_radeon_kcmd_buffer_t *cmdbuf = data; ++ drm_radeon_cmd_header_t header; ++ int orig_nbox, orig_bufsz; ++ char *kbuf = NULL; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ RING_SPACE_TEST_WITH_RETURN(dev_priv); ++ VB_AGE_TEST_WITH_RETURN(dev_priv); ++ ++ if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) { ++ return -EINVAL; ++ } ++ ++ /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid ++ * races between checking values and using those values in other code, ++ * and simply to avoid a lot of function calls to copy in data. ++ */ ++ orig_bufsz = cmdbuf->bufsz; ++ if (orig_bufsz != 0) { ++ kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER); ++ if (kbuf == NULL) ++ return -ENOMEM; ++ if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, ++ cmdbuf->bufsz)) { ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ cmdbuf->buf = kbuf; ++ } ++ ++ orig_nbox = cmdbuf->nbox; ++ ++ if (dev_priv->chip_family >= CHIP_R300) { ++ int temp; ++ temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); ++ ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ ++ return temp; ++ } ++ ++ /* microcode_version != r300 */ ++ while (cmdbuf->bufsz >= sizeof(header)) { ++ ++ header.i = *(int *)cmdbuf->buf; ++ cmdbuf->buf += sizeof(header); ++ cmdbuf->bufsz -= sizeof(header); ++ ++ switch (header.header.cmd_type) { ++ case RADEON_CMD_PACKET: ++ DRM_DEBUG("RADEON_CMD_PACKET\n"); ++ if (radeon_emit_packets ++ (dev_priv, file_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_packets failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_SCALARS: ++ DRM_DEBUG("RADEON_CMD_SCALARS\n"); ++ if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_scalars failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_VECTORS: ++ DRM_DEBUG("RADEON_CMD_VECTORS\n"); ++ if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_vectors failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_DMA_DISCARD: ++ DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); ++ idx = header.dma.buf_idx; ++ if (idx < 0 || idx >= dma->buf_count) { ++ DRM_ERROR("buffer index %d (of %d max)\n", ++ idx, dma->buf_count - 1); ++ goto err; ++ } ++ ++ buf = dma->buflist[idx]; ++ if (buf->file_priv != file_priv || buf->pending) { ++ DRM_ERROR("bad buffer %p %p %d\n", ++ buf->file_priv, file_priv, ++ buf->pending); ++ goto err; ++ } ++ ++ radeon_cp_discard_buffer(dev, buf); ++ break; ++ ++ case RADEON_CMD_PACKET3: ++ DRM_DEBUG("RADEON_CMD_PACKET3\n"); ++ if (radeon_emit_packet3(dev, file_priv, cmdbuf)) { ++ DRM_ERROR("radeon_emit_packet3 failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_PACKET3_CLIP: ++ DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n"); ++ if (radeon_emit_packet3_cliprect ++ (dev, file_priv, cmdbuf, orig_nbox)) { ++ DRM_ERROR("radeon_emit_packet3_clip failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_SCALARS2: ++ DRM_DEBUG("RADEON_CMD_SCALARS2\n"); ++ if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_scalars2 failed\n"); ++ goto err; ++ } ++ break; ++ ++ case RADEON_CMD_WAIT: ++ DRM_DEBUG("RADEON_CMD_WAIT\n"); ++ if (radeon_emit_wait(dev, header.wait.flags)) { ++ DRM_ERROR("radeon_emit_wait failed\n"); ++ goto err; ++ } ++ break; ++ case RADEON_CMD_VECLINEAR: ++ DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); ++ if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { ++ DRM_ERROR("radeon_emit_veclinear failed\n"); ++ goto err; ++ } ++ break; ++ ++ default: ++ DRM_ERROR("bad cmd_type %d at %p\n", ++ header.header.cmd_type, ++ cmdbuf->buf - sizeof(header)); ++ goto err; ++ } ++ } ++ ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ ++ DRM_DEBUG("DONE\n"); ++ COMMIT_RING(); ++ return 0; ++ ++ err: ++ if (orig_bufsz != 0) ++ drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); ++ return -EINVAL; ++} ++ ++static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_getparam_t *param = data; ++ int value; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); ++ ++ switch (param->param) { ++ case RADEON_PARAM_GART_BUFFER_OFFSET: ++ value = dev_priv->gart_buffers_offset; ++ break; ++ case RADEON_PARAM_LAST_FRAME: ++ dev_priv->stats.last_frame_reads++; ++ value = GET_SCRATCH(0); ++ break; ++ case RADEON_PARAM_LAST_DISPATCH: ++ value = GET_SCRATCH(1); ++ break; ++ case RADEON_PARAM_LAST_CLEAR: ++ dev_priv->stats.last_clear_reads++; ++ value = GET_SCRATCH(2); ++ break; ++ case RADEON_PARAM_IRQ_NR: ++ value = dev->irq; ++ break; ++ case RADEON_PARAM_GART_BASE: ++ value = dev_priv->gart_vm_start; ++ break; ++ case RADEON_PARAM_REGISTER_HANDLE: ++ value = dev_priv->mmio->offset; ++ break; ++ case RADEON_PARAM_STATUS_HANDLE: ++ value = dev_priv->ring_rptr_offset; ++ break; ++#ifndef __LP64__ ++ /* ++ * This ioctl() doesn't work on 64-bit platforms because hw_lock is a ++ * pointer which can't fit into an int-sized variable. According to ++ * Michel Dänzer, the ioctl() is only used on embedded platforms, so ++ * not supporting it shouldn't be a problem. If the same functionality ++ * is needed on 64-bit platforms, a new ioctl() would have to be added, ++ * so backwards-compatibility for the embedded platforms can be ++ * maintained. --davidm 4-Feb-2004. ++ */ ++ case RADEON_PARAM_SAREA_HANDLE: ++ /* The lock is the first dword in the sarea. */ ++ value = (long)dev->lock.hw_lock; ++ break; ++#endif ++ case RADEON_PARAM_GART_TEX_HANDLE: ++ value = dev_priv->gart_textures_offset; ++ break; ++ case RADEON_PARAM_SCRATCH_OFFSET: ++ if (!dev_priv->writeback_works) ++ return -EINVAL; ++ value = RADEON_SCRATCH_REG_OFFSET; ++ break; ++ ++ case RADEON_PARAM_CARD_TYPE: ++ if (dev_priv->flags & RADEON_IS_PCIE) ++ value = RADEON_CARD_PCIE; ++ else if (dev_priv->flags & RADEON_IS_AGP) ++ value = RADEON_CARD_AGP; ++ else ++ value = RADEON_CARD_PCI; ++ break; ++ case RADEON_PARAM_VBLANK_CRTC: ++ value = radeon_vblank_crtc_get(dev); ++ break; ++ case RADEON_PARAM_FB_LOCATION: ++ value = radeon_read_fb_location(dev_priv); ++ break; ++ case RADEON_PARAM_NUM_GB_PIPES: ++ value = dev_priv->num_gb_pipes; ++ break; ++ default: ++ DRM_DEBUG( "Invalid parameter %d\n", param->param ); ++ return -EINVAL; ++ } ++ ++ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { ++ DRM_ERROR("copy_to_user\n"); ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ drm_radeon_setparam_t *sp = data; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ if (!dev_priv) { ++ DRM_ERROR("called with no initialization\n"); ++ return -EINVAL; ++ } ++ ++ switch (sp->param) { ++ case RADEON_SETPARAM_FB_LOCATION: ++ radeon_priv = file_priv->driver_priv; ++ radeon_priv->radeon_fb_delta = dev_priv->fb_location - ++ sp->value; ++ break; ++ case RADEON_SETPARAM_SWITCH_TILING: ++ if (sp->value == 0) { ++ DRM_DEBUG("color tiling disabled\n"); ++ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; ++ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->tiling_enabled = 0; ++ } else if (sp->value == 1) { ++ DRM_DEBUG("color tiling enabled\n"); ++ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; ++ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; ++ if (dev_priv->sarea_priv) ++ dev_priv->sarea_priv->tiling_enabled = 1; ++ } ++ break; ++ case RADEON_SETPARAM_PCIGART_LOCATION: ++ dev_priv->pcigart_offset = sp->value; ++ dev_priv->pcigart_offset_set = 1; ++ break; ++ case RADEON_SETPARAM_NEW_MEMMAP: ++ dev_priv->new_memmap = sp->value; ++ break; ++ case RADEON_SETPARAM_PCIGART_TABLE_SIZE: ++ dev_priv->gart_info.table_size = sp->value; ++ if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE) ++ dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; ++ break; ++ case RADEON_SETPARAM_VBLANK_CRTC: ++ return radeon_vblank_crtc_set(dev, sp->value); ++ break; ++ default: ++ DRM_DEBUG("Invalid parameter %d\n", sp->param); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* When a client dies: ++ * - Check for and clean up flipped page state ++ * - Free any alloced GART memory. ++ * - Free any alloced radeon surfaces. ++ * ++ * DRM infrastructure takes care of reclaiming dma buffers. ++ */ ++void radeon_driver_preclose(struct drm_device *dev, ++ struct drm_file *file_priv) ++{ ++ if (dev->dev_private) { ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ dev_priv->page_flipping = 0; ++ radeon_mem_release(file_priv, dev_priv->gart_heap); ++ radeon_mem_release(file_priv, dev_priv->fb_heap); ++ radeon_surfaces_release(file_priv, dev_priv); ++ } ++} ++ ++void radeon_driver_lastclose(struct drm_device *dev) ++{ ++ if (dev->dev_private) { ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->sarea_priv && ++ dev_priv->sarea_priv->pfCurrentPage != 0) ++ radeon_cp_dispatch_flip(dev); ++ } ++ ++ radeon_do_release(dev); ++} ++ ++int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ drm_radeon_private_t *dev_priv = dev->dev_private; ++ struct drm_radeon_driver_file_fields *radeon_priv; ++ ++ DRM_DEBUG("\n"); ++ radeon_priv = ++ (struct drm_radeon_driver_file_fields *) ++ drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); ++ ++ if (!radeon_priv) ++ return -ENOMEM; ++ ++ file_priv->driver_priv = radeon_priv; ++ ++ if (dev_priv) ++ radeon_priv->radeon_fb_delta = dev_priv->fb_location; ++ else ++ radeon_priv->radeon_fb_delta = 0; ++ return 0; ++} ++ ++void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_radeon_driver_file_fields *radeon_priv = ++ file_priv->driver_priv; ++ ++ drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES); ++} ++ ++struct drm_ioctl_desc radeon_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) ++}; ++ ++int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); +diff -Nurd git/drivers/gpu/drm-tungsten/savage_bci.c git-nokia/drivers/gpu/drm-tungsten/savage_bci.c +--- git/drivers/gpu/drm-tungsten/savage_bci.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/savage_bci.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1092 @@ ++/* savage_bci.c -- BCI support for Savage ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++/* Need a long timeout for shadow status updates can take a while ++ * and so can waiting for events when the queue is full. */ ++#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ ++#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ ++#define SAVAGE_FREELIST_DEBUG 0 ++ ++static int savage_do_cleanup_bci(struct drm_device *dev); ++ ++static int ++savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t mask = dev_priv->status_used_mask; ++ uint32_t threshold = dev_priv->bci_threshold_hi; ++ uint32_t status; ++ int i; ++ ++#if SAVAGE_BCI_DEBUG ++ if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) ++ DRM_ERROR("Trying to emit %d words " ++ "(more than guaranteed space in COB)\n", n); ++#endif ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ DRM_MEMORYBARRIER(); ++ status = dev_priv->status_ptr[0]; ++ if ((status & mask) < threshold) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); ++#endif ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_STATUS_WORD0); ++ if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); ++ if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x\n", status); ++#endif ++ return -EBUSY; ++} ++ ++/* ++ * Waiting for events. ++ * ++ * The BIOSresets the event tag to 0 on mode changes. Therefore we ++ * never emit 0 to the event tag. If we find a 0 event tag we know the ++ * BIOS stomped on it and return success assuming that the BIOS waited ++ * for engine idle. ++ * ++ * Note: if the Xserver uses the event tag it has to follow the same ++ * rule. Otherwise there may be glitches every 2^16 events. ++ */ ++static int ++savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) ++{ ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { ++ DRM_MEMORYBARRIER(); ++ status = dev_priv->status_ptr[1]; ++ if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || ++ (status & 0xffff) == 0) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); ++#endif ++ ++ return -EBUSY; ++} ++ ++static int ++savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) ++{ ++ uint32_t status; ++ int i; ++ ++ for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { ++ status = SAVAGE_READ(SAVAGE_STATUS_WORD1); ++ if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || ++ (status & 0xffff) == 0) ++ return 0; ++ DRM_UDELAY(1); ++ } ++ ++#if SAVAGE_BCI_DEBUG ++ DRM_ERROR("failed!\n"); ++ DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); ++#endif ++ ++ return -EBUSY; ++} ++ ++uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, ++ unsigned int flags) ++{ ++ uint16_t count; ++ BCI_LOCALS; ++ ++ if (dev_priv->status_ptr) { ++ /* coordinate with Xserver */ ++ count = dev_priv->status_ptr[1023]; ++ if (count < dev_priv->event_counter) ++ dev_priv->event_wrap++; ++ } else { ++ count = dev_priv->event_counter; ++ } ++ count = (count + 1) & 0xffff; ++ if (count == 0) { ++ count++; /* See the comment above savage_wait_event_*. */ ++ dev_priv->event_wrap++; ++ } ++ dev_priv->event_counter = count; ++ if (dev_priv->status_ptr) ++ dev_priv->status_ptr[1023] = (uint32_t)count; ++ ++ if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { ++ unsigned int wait_cmd = BCI_CMD_WAIT; ++ if ((flags & SAVAGE_WAIT_2D)) ++ wait_cmd |= BCI_CMD_WAIT_2D; ++ if ((flags & SAVAGE_WAIT_3D)) ++ wait_cmd |= BCI_CMD_WAIT_3D; ++ BEGIN_BCI(2); ++ BCI_WRITE(wait_cmd); ++ } else { ++ BEGIN_BCI(1); ++ } ++ BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count); ++ ++ return count; ++} ++ ++/* ++ * Freelist management ++ */ ++static int savage_freelist_init(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *buf; ++ drm_savage_buf_priv_t *entry; ++ int i; ++ DRM_DEBUG("count=%d\n", dma->buf_count); ++ ++ dev_priv->head.next = &dev_priv->tail; ++ dev_priv->head.prev = NULL; ++ dev_priv->head.buf = NULL; ++ ++ dev_priv->tail.next = NULL; ++ dev_priv->tail.prev = &dev_priv->head; ++ dev_priv->tail.buf = NULL; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ buf = dma->buflist[i]; ++ entry = buf->dev_private; ++ ++ SET_AGE(&entry->age, 0, 0); ++ entry->buf = buf; ++ ++ entry->next = dev_priv->head.next; ++ entry->prev = &dev_priv->head; ++ dev_priv->head.next->prev = entry; ++ dev_priv->head.next = entry; ++ } ++ ++ return 0; ++} ++ ++static struct drm_buf *savage_freelist_get(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_buf_priv_t *tail = dev_priv->tail.prev; ++ uint16_t event; ++ unsigned int wrap; ++ DRM_DEBUG("\n"); ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ event = dev_priv->status_ptr[1] & 0xffff; ++ else ++ event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ wrap = dev_priv->event_wrap; ++ if (event > dev_priv->event_counter) ++ wrap--; /* hardware hasn't passed the last wrap yet */ ++ ++ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); ++ DRM_DEBUG(" head=0x%04x %d\n", event, wrap); ++ ++ if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { ++ drm_savage_buf_priv_t *next = tail->next; ++ drm_savage_buf_priv_t *prev = tail->prev; ++ prev->next = next; ++ next->prev = prev; ++ tail->next = tail->prev = NULL; ++ return tail->buf; ++ } ++ ++ DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); ++ return NULL; ++} ++ ++void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; ++ ++ DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); ++ ++ if (entry->next != NULL || entry->prev != NULL) { ++ DRM_ERROR("entry already on freelist.\n"); ++ return; ++ } ++ ++ prev = &dev_priv->head; ++ next = prev->next; ++ prev->next = entry; ++ next->prev = entry; ++ entry->prev = prev; ++ entry->next = next; ++} ++ ++/* ++ * Command DMA ++ */ ++static int savage_dma_init(drm_savage_private_t *dev_priv) ++{ ++ unsigned int i; ++ ++ dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / ++ (SAVAGE_DMA_PAGE_SIZE*4); ++ dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * ++ dev_priv->nr_dma_pages, DRM_MEM_DRIVER); ++ if (dev_priv->dma_pages == NULL) ++ return -ENOMEM; ++ ++ for (i = 0; i < dev_priv->nr_dma_pages; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ SET_AGE(&dev_priv->last_dma_age, 0, 0); ++ ++ dev_priv->first_dma_page = 0; ++ dev_priv->current_dma_page = 0; ++ ++ return 0; ++} ++ ++void savage_dma_reset(drm_savage_private_t *dev_priv) ++{ ++ uint16_t event; ++ unsigned int wrap, i; ++ event = savage_bci_emit_event(dev_priv, 0); ++ wrap = dev_priv->event_wrap; ++ for (i = 0; i < dev_priv->nr_dma_pages; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ SET_AGE(&dev_priv->last_dma_age, event, wrap); ++ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; ++} ++ ++void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page) ++{ ++ uint16_t event; ++ unsigned int wrap; ++ ++ /* Faked DMA buffer pages don't age. */ ++ if (dev_priv->cmd_dma == &dev_priv->fake_dma) ++ return; ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ event = dev_priv->status_ptr[1] & 0xffff; ++ else ++ event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ wrap = dev_priv->event_wrap; ++ if (event > dev_priv->event_counter) ++ wrap--; /* hardware hasn't passed the last wrap yet */ ++ ++ if (dev_priv->dma_pages[page].age.wrap > wrap || ++ (dev_priv->dma_pages[page].age.wrap == wrap && ++ dev_priv->dma_pages[page].age.event > event)) { ++ if (dev_priv->wait_evnt(dev_priv, ++ dev_priv->dma_pages[page].age.event) ++ < 0) ++ DRM_ERROR("wait_evnt failed!\n"); ++ } ++} ++ ++uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) ++{ ++ unsigned int cur = dev_priv->current_dma_page; ++ unsigned int rest = SAVAGE_DMA_PAGE_SIZE - ++ dev_priv->dma_pages[cur].used; ++ unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / ++ SAVAGE_DMA_PAGE_SIZE; ++ uint32_t *dma_ptr; ++ unsigned int i; ++ ++ DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", ++ cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); ++ ++ if (cur + nr_pages < dev_priv->nr_dma_pages) { ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; ++ if (n < rest) ++ rest = n; ++ dev_priv->dma_pages[cur].used += rest; ++ n -= rest; ++ cur++; ++ } else { ++ dev_priv->dma_flush(dev_priv); ++ nr_pages = ++ (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; ++ for (i = cur; i < dev_priv->nr_dma_pages; ++i) { ++ dev_priv->dma_pages[i].age = dev_priv->last_dma_age; ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; ++ dev_priv->first_dma_page = cur = 0; ++ } ++ for (i = cur; nr_pages > 0; ++i, --nr_pages) { ++#if SAVAGE_DMA_DEBUG ++ if (dev_priv->dma_pages[i].used) { ++ DRM_ERROR("unflushed page %u: used=%u\n", ++ i, dev_priv->dma_pages[i].used); ++ } ++#endif ++ if (n > SAVAGE_DMA_PAGE_SIZE) ++ dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; ++ else ++ dev_priv->dma_pages[i].used = n; ++ n -= SAVAGE_DMA_PAGE_SIZE; ++ } ++ dev_priv->current_dma_page = --i; ++ ++ DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", ++ i, dev_priv->dma_pages[i].used, n); ++ ++ savage_dma_wait(dev_priv, dev_priv->current_dma_page); ++ ++ return dma_ptr; ++} ++ ++static void savage_dma_flush(drm_savage_private_t *dev_priv) ++{ ++ unsigned int first = dev_priv->first_dma_page; ++ unsigned int cur = dev_priv->current_dma_page; ++ uint16_t event; ++ unsigned int wrap, pad, align, len, i; ++ unsigned long phys_addr; ++ BCI_LOCALS; ++ ++ if (first == cur && ++ dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) ++ return; ++ ++ /* pad length to multiples of 2 entries ++ * align start of next DMA block to multiles of 8 entries */ ++ pad = -dev_priv->dma_pages[cur].used & 1; ++ align = -(dev_priv->dma_pages[cur].used + pad) & 7; ++ ++ DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " ++ "pad=%u, align=%u\n", ++ first, cur, dev_priv->dma_pages[first].flushed, ++ dev_priv->dma_pages[cur].used, pad, align); ++ ++ /* pad with noops */ ++ if (pad) { ++ uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; ++ dev_priv->dma_pages[cur].used += pad; ++ while (pad != 0) { ++ *dma_ptr++ = BCI_CMD_WAIT; ++ pad--; ++ } ++ } ++ ++ DRM_MEMORYBARRIER(); ++ ++ /* do flush ... */ ++ phys_addr = dev_priv->cmd_dma->offset + ++ (first * SAVAGE_DMA_PAGE_SIZE + ++ dev_priv->dma_pages[first].flushed) * 4; ++ len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + ++ dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; ++ ++ DRM_DEBUG("phys_addr=%lx, len=%u\n", ++ phys_addr | dev_priv->dma_type, len); ++ ++ BEGIN_BCI(3); ++ BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); ++ BCI_WRITE(phys_addr | dev_priv->dma_type); ++ BCI_DMA(len); ++ ++ /* fix alignment of the start of the next block */ ++ dev_priv->dma_pages[cur].used += align; ++ ++ /* age DMA pages */ ++ event = savage_bci_emit_event(dev_priv, 0); ++ wrap = dev_priv->event_wrap; ++ for (i = first; i < cur; ++i) { ++ SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); ++ dev_priv->dma_pages[i].used = 0; ++ dev_priv->dma_pages[i].flushed = 0; ++ } ++ /* age the current page only when it's full */ ++ if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { ++ SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); ++ dev_priv->dma_pages[cur].used = 0; ++ dev_priv->dma_pages[cur].flushed = 0; ++ /* advance to next page */ ++ cur++; ++ if (cur == dev_priv->nr_dma_pages) ++ cur = 0; ++ dev_priv->first_dma_page = dev_priv->current_dma_page = cur; ++ } else { ++ dev_priv->first_dma_page = cur; ++ dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; ++ } ++ SET_AGE(&dev_priv->last_dma_age, event, wrap); ++ ++ DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, ++ dev_priv->dma_pages[cur].used, ++ dev_priv->dma_pages[cur].flushed); ++} ++ ++static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) ++{ ++ unsigned int i, j; ++ BCI_LOCALS; ++ ++ if (dev_priv->first_dma_page == dev_priv->current_dma_page && ++ dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) ++ return; ++ ++ DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", ++ dev_priv->first_dma_page, dev_priv->current_dma_page, ++ dev_priv->dma_pages[dev_priv->current_dma_page].used); ++ ++ for (i = dev_priv->first_dma_page; ++ i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++ ++i) { ++ uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + ++ i * SAVAGE_DMA_PAGE_SIZE; ++#if SAVAGE_DMA_DEBUG ++ /* Sanity check: all pages except the last one must be full. */ ++ if (i < dev_priv->current_dma_page && ++ dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { ++ DRM_ERROR("partial DMA page %u: used=%u", ++ i, dev_priv->dma_pages[i].used); ++ } ++#endif ++ BEGIN_BCI(dev_priv->dma_pages[i].used); ++ for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { ++ BCI_WRITE(dma_ptr[j]); ++ } ++ dev_priv->dma_pages[i].used = 0; ++ } ++ ++ /* reset to first page */ ++ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; ++} ++ ++int savage_driver_load(struct drm_device *dev, unsigned long chipset) ++{ ++ drm_savage_private_t *dev_priv; ++ ++ dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ memset(dev_priv, 0, sizeof(drm_savage_private_t)); ++ dev->dev_private = (void *)dev_priv; ++ ++ dev_priv->chipset = (enum savage_family)chipset; ++ ++ return 0; ++} ++ ++/* ++ * Initalize mappings. On Savage4 and SavageIX the alignment ++ * and size of the aperture is not suitable for automatic MTRR setup ++ * in drm_addmap. Therefore we add them manually before the maps are ++ * initialized, and tear them down on last close. ++ */ ++int savage_driver_firstopen(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ unsigned long mmio_base, fb_base, fb_size, aperture_base; ++ /* fb_rsrc and aper_rsrc aren't really used currently, but still exist ++ * in case we decide we need information on the BAR for BSD in the ++ * future. ++ */ ++ unsigned int fb_rsrc, aper_rsrc; ++ int ret = 0; ++ ++ dev_priv->mtrr[0].handle = -1; ++ dev_priv->mtrr[1].handle = -1; ++ dev_priv->mtrr[2].handle = -1; ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ fb_rsrc = 0; ++ fb_base = drm_get_resource_start(dev, 0); ++ fb_size = SAVAGE_FB_SIZE_S3; ++ mmio_base = fb_base + SAVAGE_FB_SIZE_S3; ++ aper_rsrc = 0; ++ aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; ++ /* this should always be true */ ++ if (drm_get_resource_len(dev, 0) == 0x08000000) { ++ /* Don't make MMIO write-cobining! We need 3 ++ * MTRRs. */ ++ dev_priv->mtrr[0].base = fb_base; ++ dev_priv->mtrr[0].size = 0x01000000; ++ dev_priv->mtrr[0].handle = ++ drm_mtrr_add(dev_priv->mtrr[0].base, ++ dev_priv->mtrr[0].size, DRM_MTRR_WC); ++ dev_priv->mtrr[1].base = fb_base + 0x02000000; ++ dev_priv->mtrr[1].size = 0x02000000; ++ dev_priv->mtrr[1].handle = ++ drm_mtrr_add(dev_priv->mtrr[1].base, ++ dev_priv->mtrr[1].size, DRM_MTRR_WC); ++ dev_priv->mtrr[2].base = fb_base + 0x04000000; ++ dev_priv->mtrr[2].size = 0x04000000; ++ dev_priv->mtrr[2].handle = ++ drm_mtrr_add(dev_priv->mtrr[2].base, ++ dev_priv->mtrr[2].size, DRM_MTRR_WC); ++ } else { ++ DRM_ERROR("strange pci_resource_len %08lx\n", ++ drm_get_resource_len(dev, 0)); ++ } ++ } else if (dev_priv->chipset != S3_SUPERSAVAGE && ++ dev_priv->chipset != S3_SAVAGE2000) { ++ mmio_base = drm_get_resource_start(dev, 0); ++ fb_rsrc = 1; ++ fb_base = drm_get_resource_start(dev, 1); ++ fb_size = SAVAGE_FB_SIZE_S4; ++ aper_rsrc = 1; ++ aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; ++ /* this should always be true */ ++ if (drm_get_resource_len(dev, 1) == 0x08000000) { ++ /* Can use one MTRR to cover both fb and ++ * aperture. */ ++ dev_priv->mtrr[0].base = fb_base; ++ dev_priv->mtrr[0].size = 0x08000000; ++ dev_priv->mtrr[0].handle = ++ drm_mtrr_add(dev_priv->mtrr[0].base, ++ dev_priv->mtrr[0].size, DRM_MTRR_WC); ++ } else { ++ DRM_ERROR("strange pci_resource_len %08lx\n", ++ drm_get_resource_len(dev, 1)); ++ } ++ } else { ++ mmio_base = drm_get_resource_start(dev, 0); ++ fb_rsrc = 1; ++ fb_base = drm_get_resource_start(dev, 1); ++ fb_size = drm_get_resource_len(dev, 1); ++ aper_rsrc = 2; ++ aperture_base = drm_get_resource_start(dev, 2); ++ /* Automatic MTRR setup will do the right thing. */ ++ } ++ ++ ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, ++ _DRM_READ_ONLY, &dev_priv->mmio); ++ if (ret) ++ return ret; ++ ++ ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, ++ _DRM_WRITE_COMBINING, &dev_priv->fb); ++ if (ret) ++ return ret; ++ ++ ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, ++ _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, ++ &dev_priv->aperture); ++ if (ret) ++ return ret; ++ ++ return ret; ++} ++ ++/* ++ * Delete MTRRs and free device-private data. ++ */ ++void savage_driver_lastclose(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ for (i = 0; i < 3; ++i) ++ if (dev_priv->mtrr[i].handle >= 0) ++ drm_mtrr_del(dev_priv->mtrr[i].handle, ++ dev_priv->mtrr[i].base, ++ dev_priv->mtrr[i].size, DRM_MTRR_WC); ++} ++ ++int savage_driver_unload(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ if (init->fb_bpp != 16 && init->fb_bpp != 32) { ++ DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); ++ return -EINVAL; ++ } ++ if (init->depth_bpp != 16 && init->depth_bpp != 32) { ++ DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); ++ return -EINVAL; ++ } ++ if (init->dma_type != SAVAGE_DMA_AGP && ++ init->dma_type != SAVAGE_DMA_PCI) { ++ DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); ++ return -EINVAL; ++ } ++ ++ dev_priv->cob_size = init->cob_size; ++ dev_priv->bci_threshold_lo = init->bci_threshold_lo; ++ dev_priv->bci_threshold_hi = init->bci_threshold_hi; ++ dev_priv->dma_type = init->dma_type; ++ ++ dev_priv->fb_bpp = init->fb_bpp; ++ dev_priv->front_offset = init->front_offset; ++ dev_priv->front_pitch = init->front_pitch; ++ dev_priv->back_offset = init->back_offset; ++ dev_priv->back_pitch = init->back_pitch; ++ dev_priv->depth_bpp = init->depth_bpp; ++ dev_priv->depth_offset = init->depth_offset; ++ dev_priv->depth_pitch = init->depth_pitch; ++ ++ dev_priv->texture_offset = init->texture_offset; ++ dev_priv->texture_size = init->texture_size; ++ ++ dev_priv->sarea = drm_getsarea(dev); ++ if (!dev_priv->sarea) { ++ DRM_ERROR("could not find sarea!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (init->status_offset != 0) { ++ dev_priv->status = drm_core_findmap(dev, init->status_offset); ++ if (!dev_priv->status) { ++ DRM_ERROR("could not find shadow status region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->status = NULL; ++ } ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { ++ dev->agp_buffer_token = init->buffers_offset; ++ dev->agp_buffer_map = drm_core_findmap(dev, ++ init->buffers_offset); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("could not find DMA buffer region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ drm_core_ioremap(dev->agp_buffer_map, dev); ++ if (!dev->agp_buffer_map) { ++ DRM_ERROR("failed to ioremap DMA buffer region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ } ++ if (init->agp_textures_offset) { ++ dev_priv->agp_textures = ++ drm_core_findmap(dev, init->agp_textures_offset); ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("could not find agp texture region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->agp_textures = NULL; ++ } ++ ++ if (init->cmd_dma_offset) { ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ DRM_ERROR("command DMA not supported on " ++ "Savage3D/MX/IX.\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (dev->dma && dev->dma->buflist) { ++ DRM_ERROR("command and vertex DMA not supported " ++ "at the same time.\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); ++ if (!dev_priv->cmd_dma) { ++ DRM_ERROR("could not find command DMA region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP) { ++ if (dev_priv->cmd_dma->type != _DRM_AGP) { ++ DRM_ERROR("AGP command DMA region is not a " ++ "_DRM_AGP map!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ drm_core_ioremap(dev_priv->cmd_dma, dev); ++ if (!dev_priv->cmd_dma->handle) { ++ DRM_ERROR("failed to ioremap command " ++ "DMA region!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { ++ DRM_ERROR("PCI command DMA region is not a " ++ "_DRM_CONSISTENT map!\n"); ++ savage_do_cleanup_bci(dev); ++ return -EINVAL; ++ } ++ } else { ++ dev_priv->cmd_dma = NULL; ++ } ++ ++ dev_priv->dma_flush = savage_dma_flush; ++ if (!dev_priv->cmd_dma) { ++ DRM_DEBUG("falling back to faked command DMA.\n"); ++ dev_priv->fake_dma.offset = 0; ++ dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; ++ dev_priv->fake_dma.type = _DRM_SHM; ++ dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, ++ DRM_MEM_DRIVER); ++ if (!dev_priv->fake_dma.handle) { ++ DRM_ERROR("could not allocate faked DMA buffer!\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ dev_priv->cmd_dma = &dev_priv->fake_dma; ++ dev_priv->dma_flush = savage_fake_dma_flush; ++ } ++ ++ dev_priv->sarea_priv = ++ (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + ++ init->sarea_priv_offset); ++ ++ /* setup bitmap descriptors */ ++ { ++ unsigned int color_tile_format; ++ unsigned int depth_tile_format; ++ unsigned int front_stride, back_stride, depth_stride; ++ if (dev_priv->chipset <= S3_SAVAGE4) { ++ color_tile_format = dev_priv->fb_bpp == 16 ? ++ SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; ++ depth_tile_format = dev_priv->depth_bpp == 16 ? ++ SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; ++ } else { ++ color_tile_format = SAVAGE_BD_TILE_DEST; ++ depth_tile_format = SAVAGE_BD_TILE_DEST; ++ } ++ front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); ++ back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); ++ depth_stride = ++ dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); ++ ++ dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (color_tile_format << SAVAGE_BD_TILE_SHIFT); ++ ++ dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (color_tile_format << SAVAGE_BD_TILE_SHIFT); ++ ++ dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | ++ (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | ++ (depth_tile_format << SAVAGE_BD_TILE_SHIFT); ++ } ++ ++ /* setup status and bci ptr */ ++ dev_priv->event_counter = 0; ++ dev_priv->event_wrap = 0; ++ dev_priv->bci_ptr = (volatile uint32_t *) ++ ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; ++ } else { ++ dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; ++ } ++ if (dev_priv->status != NULL) { ++ dev_priv->status_ptr = ++ (volatile uint32_t *)dev_priv->status->handle; ++ dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; ++ dev_priv->wait_evnt = savage_bci_wait_event_shadow; ++ dev_priv->status_ptr[1023] = dev_priv->event_counter; ++ } else { ++ dev_priv->status_ptr = NULL; ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; ++ } else { ++ dev_priv->wait_fifo = savage_bci_wait_fifo_s4; ++ } ++ dev_priv->wait_evnt = savage_bci_wait_event_reg; ++ } ++ ++ /* cliprect functions */ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) ++ dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; ++ else ++ dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; ++ ++ if (savage_freelist_init(dev) < 0) { ++ DRM_ERROR("could not initialize freelist\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ ++ if (savage_dma_init(dev_priv) < 0) { ++ DRM_ERROR("could not initialize command DMA\n"); ++ savage_do_cleanup_bci(dev); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int savage_do_cleanup_bci(struct drm_device *dev) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ ++ if (dev_priv->cmd_dma == &dev_priv->fake_dma) { ++ if (dev_priv->fake_dma.handle) ++ drm_free(dev_priv->fake_dma.handle, ++ SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); ++ } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && ++ dev_priv->cmd_dma->type == _DRM_AGP && ++ dev_priv->dma_type == SAVAGE_DMA_AGP) ++ drm_core_ioremapfree(dev_priv->cmd_dma, dev); ++ ++ if (dev_priv->dma_type == SAVAGE_DMA_AGP && ++ dev->agp_buffer_map && dev->agp_buffer_map->handle) { ++ drm_core_ioremapfree(dev->agp_buffer_map, dev); ++ /* make sure the next instance (which may be running ++ * in PCI mode) doesn't try to use an old ++ * agp_buffer_map. */ ++ dev->agp_buffer_map = NULL; ++ } ++ ++ if (dev_priv->dma_pages) ++ drm_free(dev_priv->dma_pages, ++ sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, ++ DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_init_t *init = data; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ switch (init->func) { ++ case SAVAGE_INIT_BCI: ++ return savage_do_init_bci(dev, init); ++ case SAVAGE_CLEANUP_BCI: ++ return savage_do_cleanup_bci(dev); ++ } ++ ++ return -EINVAL; ++} ++ ++static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_event_emit_t *event = data; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ event->count = savage_bci_emit_event(dev_priv, event->flags); ++ event->count |= dev_priv->event_wrap << 16; ++ ++ return 0; ++} ++ ++static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ drm_savage_event_wait_t *event = data; ++ unsigned int event_e, hw_e; ++ unsigned int event_w, hw_w; ++ ++ DRM_DEBUG("\n"); ++ ++ UPDATE_EVENT_COUNTER(); ++ if (dev_priv->status_ptr) ++ hw_e = dev_priv->status_ptr[1] & 0xffff; ++ else ++ hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; ++ hw_w = dev_priv->event_wrap; ++ if (hw_e > dev_priv->event_counter) ++ hw_w--; /* hardware hasn't passed the last wrap yet */ ++ ++ event_e = event->count & 0xffff; ++ event_w = event->count >> 16; ++ ++ /* Don't need to wait if ++ * - event counter wrapped since the event was emitted or ++ * - the hardware has advanced up to or over the event to wait for. ++ */ ++ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) ++ return 0; ++ else ++ return dev_priv->wait_evnt(dev_priv, event_e); ++} ++ ++/* ++ * DMA buffer management ++ */ ++ ++static int savage_bci_get_buffers(struct drm_device *dev, ++ struct drm_file *file_priv, ++ struct drm_dma *d) ++{ ++ struct drm_buf *buf; ++ int i; ++ ++ for (i = d->granted_count; i < d->request_count; i++) { ++ buf = savage_freelist_get(dev); ++ if (!buf) ++ return -EAGAIN; ++ ++ buf->file_priv = file_priv; ++ ++ if (DRM_COPY_TO_USER(&d->request_indices[i], ++ &buf->idx, sizeof(buf->idx))) ++ return -EFAULT; ++ if (DRM_COPY_TO_USER(&d->request_sizes[i], ++ &buf->total, sizeof(buf->total))) ++ return -EFAULT; ++ ++ d->granted_count++; ++ } ++ return 0; ++} ++ ++int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_dma *d = data; ++ int ret = 0; ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ /* Please don't send us buffers. ++ */ ++ if (d->send_count != 0) { ++ DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", ++ DRM_CURRENTPID, d->send_count); ++ return -EINVAL; ++ } ++ ++ /* We'll send you buffers. ++ */ ++ if (d->request_count < 0 || d->request_count > dma->buf_count) { ++ DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", ++ DRM_CURRENTPID, d->request_count, dma->buf_count); ++ return -EINVAL; ++ } ++ ++ d->granted_count = 0; ++ ++ if (d->request_count) { ++ ret = savage_bci_get_buffers(dev, file_priv, d); ++ } ++ ++ return ret; ++} ++ ++void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) ++{ ++ struct drm_device_dma *dma = dev->dma; ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ int i; ++ ++ if (!dma) ++ return; ++ if (!dev_priv) ++ return; ++ if (!dma->buflist) ++ return; ++ ++ for (i = 0; i < dma->buf_count; i++) { ++ struct drm_buf *buf = dma->buflist[i]; ++ drm_savage_buf_priv_t *buf_priv = buf->dev_private; ++ ++ if (buf->file_priv == file_priv && buf_priv && ++ buf_priv->next == NULL && buf_priv->prev == NULL) { ++ uint16_t event; ++ DRM_DEBUG("reclaimed from client\n"); ++ event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); ++ SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); ++ savage_freelist_put(dev, buf); ++ } ++ } ++ ++ drm_core_reclaim_buffers(dev, file_priv); ++} ++ ++struct drm_ioctl_desc savage_ioctls[] = { ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), ++ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), ++}; ++ ++int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); +diff -Nurd git/drivers/gpu/drm-tungsten/savage_drm.h git-nokia/drivers/gpu/drm-tungsten/savage_drm.h +--- git/drivers/gpu/drm-tungsten/savage_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/savage_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,209 @@ ++/* savage_drm.h -- Public header for the savage driver ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __SAVAGE_DRM_H__ ++#define __SAVAGE_DRM_H__ ++ ++#ifndef __SAVAGE_SAREA_DEFINES__ ++#define __SAVAGE_SAREA_DEFINES__ ++ ++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128 ++ * regions, subject to a minimum region size of (1<<16) == 64k. ++ * ++ * Clients may subdivide regions internally, but when sharing between ++ * clients, the region size is the minimum granularity. ++ */ ++ ++#define SAVAGE_CARD_HEAP 0 ++#define SAVAGE_AGP_HEAP 1 ++#define SAVAGE_NR_TEX_HEAPS 2 ++#define SAVAGE_NR_TEX_REGIONS 16 ++#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 ++ ++#endif /* __SAVAGE_SAREA_DEFINES__ */ ++ ++typedef struct _drm_savage_sarea { ++ /* LRU lists for texture memory in agp space and on the card. ++ */ ++ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; ++ unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; ++ ++ /* Mechanism to validate card state. ++ */ ++ int ctxOwner; ++} drm_savage_sarea_t, *drm_savage_sarea_ptr; ++ ++/* Savage-specific ioctls ++ */ ++#define DRM_SAVAGE_BCI_INIT 0x00 ++#define DRM_SAVAGE_BCI_CMDBUF 0x01 ++#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 ++#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 ++ ++#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) ++#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) ++#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) ++#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) ++ ++#define SAVAGE_DMA_PCI 1 ++#define SAVAGE_DMA_AGP 3 ++typedef struct drm_savage_init { ++ enum { ++ SAVAGE_INIT_BCI = 1, ++ SAVAGE_CLEANUP_BCI = 2 ++ } func; ++ unsigned int sarea_priv_offset; ++ ++ /* some parameters */ ++ unsigned int cob_size; ++ unsigned int bci_threshold_lo, bci_threshold_hi; ++ unsigned int dma_type; ++ ++ /* frame buffer layout */ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ /* local textures */ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ /* physical locations of non-permanent maps */ ++ unsigned long status_offset; ++ unsigned long buffers_offset; ++ unsigned long agp_textures_offset; ++ unsigned long cmd_dma_offset; ++} drm_savage_init_t; ++ ++typedef union drm_savage_cmd_header drm_savage_cmd_header_t; ++typedef struct drm_savage_cmdbuf { ++ /* command buffer in client's address space */ ++ drm_savage_cmd_header_t __user *cmd_addr; ++ unsigned int size; /* size of the command buffer in 64bit units */ ++ ++ unsigned int dma_idx; /* DMA buffer index to use */ ++ int discard; /* discard DMA buffer when done */ ++ /* vertex buffer in client's address space */ ++ unsigned int __user *vb_addr; ++ unsigned int vb_size; /* size of client vertex buffer in bytes */ ++ unsigned int vb_stride; /* stride of vertices in 32bit words */ ++ /* boxes in client's address space */ ++ struct drm_clip_rect __user *box_addr; ++ unsigned int nbox; /* number of clipping boxes */ ++} drm_savage_cmdbuf_t; ++ ++#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */ ++#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */ ++#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */ ++typedef struct drm_savage_event { ++ unsigned int count; ++ unsigned int flags; ++} drm_savage_event_emit_t, drm_savage_event_wait_t; ++ ++/* Commands for the cmdbuf ioctl ++ */ ++#define SAVAGE_CMD_STATE 0 /* a range of state registers */ ++#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */ ++#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */ ++#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */ ++#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */ ++#define SAVAGE_CMD_CLEAR 5 /* clear buffers */ ++#define SAVAGE_CMD_SWAP 6 /* swap buffers */ ++ ++/* Primitive types ++*/ ++#define SAVAGE_PRIM_TRILIST 0 /* triangle list */ ++#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */ ++#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */ ++#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat ++ * shading on s3d */ ++ ++/* Skip flags (vertex format) ++ */ ++#define SAVAGE_SKIP_Z 0x01 ++#define SAVAGE_SKIP_W 0x02 ++#define SAVAGE_SKIP_C0 0x04 ++#define SAVAGE_SKIP_C1 0x08 ++#define SAVAGE_SKIP_S0 0x10 ++#define SAVAGE_SKIP_T0 0x20 ++#define SAVAGE_SKIP_ST0 0x30 ++#define SAVAGE_SKIP_S1 0x40 ++#define SAVAGE_SKIP_T1 0x80 ++#define SAVAGE_SKIP_ST1 0xc0 ++#define SAVAGE_SKIP_ALL_S3D 0x3f ++#define SAVAGE_SKIP_ALL_S4 0xff ++ ++/* Buffer names for clear command ++ */ ++#define SAVAGE_FRONT 0x1 ++#define SAVAGE_BACK 0x2 ++#define SAVAGE_DEPTH 0x4 ++ ++/* 64-bit command header ++ */ ++union drm_savage_cmd_header { ++ struct { ++ unsigned char cmd; /* command */ ++ unsigned char pad0; ++ unsigned short pad1; ++ unsigned short pad2; ++ unsigned short pad3; ++ } cmd; /* generic */ ++ struct { ++ unsigned char cmd; ++ unsigned char global; /* need idle engine? */ ++ unsigned short count; /* number of consecutive registers */ ++ unsigned short start; /* first register */ ++ unsigned short pad3; ++ } state; /* SAVAGE_CMD_STATE */ ++ struct { ++ unsigned char cmd; ++ unsigned char prim; /* primitive type */ ++ unsigned short skip; /* vertex format (skip flags) */ ++ unsigned short count; /* number of vertices */ ++ unsigned short start; /* first vertex in DMA/vertex buffer */ ++ } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */ ++ struct { ++ unsigned char cmd; ++ unsigned char prim; ++ unsigned short skip; ++ unsigned short count; /* number of indices that follow */ ++ unsigned short pad3; ++ } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */ ++ struct { ++ unsigned char cmd; ++ unsigned char pad0; ++ unsigned short pad1; ++ unsigned int flags; ++ } clear0; /* SAVAGE_CMD_CLEAR */ ++ struct { ++ unsigned int mask; ++ unsigned int value; ++ } clear1; /* SAVAGE_CMD_CLEAR data */ ++}; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.c git-nokia/drivers/gpu/drm-tungsten/savage_drv.c +--- git/drivers/gpu/drm-tungsten/savage_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,96 @@ ++/* savage_drv.c -- Savage driver for Linux ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ savage_PCI_IDS ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = ++ DRIVER_USE_AGP | DRIVER_USE_MTRR | ++ DRIVER_HAVE_DMA | DRIVER_PCI_DMA, ++ .dev_priv_size = sizeof(drm_savage_buf_priv_t), ++ .load = savage_driver_load, ++ .firstopen = savage_driver_firstopen, ++ .lastclose = savage_driver_lastclose, ++ .unload = savage_driver_unload, ++ .reclaim_buffers = savage_reclaim_buffers, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = savage_ioctls, ++ .dma_ioctl = savage_bci_buffers, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init savage_init(void) ++{ ++ driver.num_ioctls = savage_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit savage_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(savage_init); ++module_exit(savage_exit); ++ ++MODULE_AUTHOR( DRIVER_AUTHOR ); ++MODULE_DESCRIPTION( DRIVER_DESC ); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.h git-nokia/drivers/gpu/drm-tungsten/savage_drv.h +--- git/drivers/gpu/drm-tungsten/savage_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,575 @@ ++/* savage_drv.h -- Private header for the savage driver */ ++/* ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __SAVAGE_DRV_H__ ++#define __SAVAGE_DRV_H__ ++ ++#define DRIVER_AUTHOR "Felix Kuehling" ++ ++#define DRIVER_NAME "savage" ++#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]" ++#define DRIVER_DATE "20050313" ++ ++#define DRIVER_MAJOR 2 ++#define DRIVER_MINOR 4 ++#define DRIVER_PATCHLEVEL 1 ++/* Interface history: ++ * ++ * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy ++ * 2.0 The first real DRM ++ * 2.1 Scissors registers managed by the DRM, 3D operations clipped by ++ * cliprects of the cmdbuf ioctl ++ * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX ++ * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits ++ * wide and thus very long lived (unlikely to ever wrap). The size ++ * in the struct was 32 bits before, but only 16 bits were used ++ * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is ++ * actually used ++ */ ++ ++typedef struct drm_savage_age { ++ uint16_t event; ++ unsigned int wrap; ++} drm_savage_age_t; ++ ++typedef struct drm_savage_buf_priv { ++ struct drm_savage_buf_priv *next; ++ struct drm_savage_buf_priv *prev; ++ drm_savage_age_t age; ++ struct drm_buf *buf; ++} drm_savage_buf_priv_t; ++ ++typedef struct drm_savage_dma_page { ++ drm_savage_age_t age; ++ unsigned int used, flushed; ++} drm_savage_dma_page_t; ++#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ ++/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command ++ * size of 16kbytes or 4k entries. Minimum requirement would be ++ * 10kbytes for 255 40-byte vertices in one drawing command. */ ++#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4) ++ ++/* interesting bits of hardware state that are saved in dev_priv */ ++typedef union { ++ struct drm_savage_common_state { ++ uint32_t vbaddr; ++ } common; ++ struct { ++ unsigned char pad[sizeof(struct drm_savage_common_state)]; ++ uint32_t texctrl, texaddr; ++ uint32_t scstart, new_scstart; ++ uint32_t scend, new_scend; ++ } s3d; ++ struct { ++ unsigned char pad[sizeof(struct drm_savage_common_state)]; ++ uint32_t texdescr, texaddr0, texaddr1; ++ uint32_t drawctrl0, new_drawctrl0; ++ uint32_t drawctrl1, new_drawctrl1; ++ } s4; ++} drm_savage_state_t; ++ ++/* these chip tags should match the ones in the 2D driver in savage_regs.h. */ ++enum savage_family { ++ S3_UNKNOWN = 0, ++ S3_SAVAGE3D, ++ S3_SAVAGE_MX, ++ S3_SAVAGE4, ++ S3_PROSAVAGE, ++ S3_TWISTER, ++ S3_PROSAVAGEDDR, ++ S3_SUPERSAVAGE, ++ S3_SAVAGE2000, ++ S3_LAST ++}; ++ ++extern struct drm_ioctl_desc savage_ioctls[]; ++extern int savage_max_ioctl; ++ ++#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) ++ ++#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \ ++ || (chip==S3_PROSAVAGE) \ ++ || (chip==S3_TWISTER) \ ++ || (chip==S3_PROSAVAGEDDR)) ++ ++#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) ++ ++#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) ++ ++#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \ ++ ||(chip==S3_PROSAVAGEDDR)) ++ ++/* flags */ ++#define SAVAGE_IS_AGP 1 ++ ++typedef struct drm_savage_private { ++ drm_savage_sarea_t *sarea_priv; ++ ++ drm_savage_buf_priv_t head, tail; ++ ++ /* who am I? */ ++ enum savage_family chipset; ++ ++ unsigned int cob_size; ++ unsigned int bci_threshold_lo, bci_threshold_hi; ++ unsigned int dma_type; ++ ++ /* frame buffer layout */ ++ unsigned int fb_bpp; ++ unsigned int front_offset, front_pitch; ++ unsigned int back_offset, back_pitch; ++ unsigned int depth_bpp; ++ unsigned int depth_offset, depth_pitch; ++ ++ /* bitmap descriptors for swap and clear */ ++ unsigned int front_bd, back_bd, depth_bd; ++ ++ /* local textures */ ++ unsigned int texture_offset; ++ unsigned int texture_size; ++ ++ /* memory regions in physical memory */ ++ drm_local_map_t *sarea; ++ drm_local_map_t *mmio; ++ drm_local_map_t *fb; ++ drm_local_map_t *aperture; ++ drm_local_map_t *status; ++ drm_local_map_t *agp_textures; ++ drm_local_map_t *cmd_dma; ++ drm_local_map_t fake_dma; ++ ++ struct { ++ int handle; ++ unsigned long base, size; ++ } mtrr[3]; ++ ++ /* BCI and status-related stuff */ ++ volatile uint32_t *status_ptr, *bci_ptr; ++ uint32_t status_used_mask; ++ uint16_t event_counter; ++ unsigned int event_wrap; ++ ++ /* Savage4 command DMA */ ++ drm_savage_dma_page_t *dma_pages; ++ unsigned int nr_dma_pages, first_dma_page, current_dma_page; ++ drm_savage_age_t last_dma_age; ++ ++ /* saved hw state for global/local check on S3D */ ++ uint32_t hw_draw_ctrl, hw_zbuf_ctrl; ++ /* and for scissors (global, so don't emit if not changed) */ ++ uint32_t hw_scissors_start, hw_scissors_end; ++ ++ drm_savage_state_t state; ++ ++ /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */ ++ unsigned int waiting; ++ ++ /* config/hardware-dependent function pointers */ ++ int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n); ++ int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e); ++ /* Err, there is a macro wait_event in include/linux/wait.h. ++ * Avoid unwanted macro expansion. */ ++ void (*emit_clip_rect)(struct drm_savage_private *dev_priv, ++ const struct drm_clip_rect *pbox); ++ void (*dma_flush)(struct drm_savage_private *dev_priv); ++} drm_savage_private_t; ++ ++/* ioctls */ ++extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv); ++extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); ++ ++/* BCI functions */ ++extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, ++ unsigned int flags); ++extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf); ++extern void savage_dma_reset(drm_savage_private_t *dev_priv); ++extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); ++extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, ++ unsigned int n); ++extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); ++extern int savage_driver_firstopen(struct drm_device *dev); ++extern void savage_driver_lastclose(struct drm_device *dev); ++extern int savage_driver_unload(struct drm_device *dev); ++extern void savage_reclaim_buffers(struct drm_device *dev, ++ struct drm_file *file_priv); ++ ++/* state functions */ ++extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox); ++extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox); ++ ++#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ ++#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ ++#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */ ++#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */ ++#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */ ++ ++#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region ++ * inside the MMIO region */ ++#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip ++ * BCI FIFO */ ++ ++/* ++ * MMIO registers ++ */ ++#define SAVAGE_STATUS_WORD0 0x48C00 ++#define SAVAGE_STATUS_WORD1 0x48C04 ++#define SAVAGE_ALT_STATUS_WORD0 0x48C60 ++ ++#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff ++#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff ++ ++/* Copied from savage_bci.h in the 2D driver with some renaming. */ ++ ++/* Bitmap descriptors */ ++#define SAVAGE_BD_STRIDE_SHIFT 0 ++#define SAVAGE_BD_BPP_SHIFT 16 ++#define SAVAGE_BD_TILE_SHIFT 24 ++#define SAVAGE_BD_BW_DISABLE (1<<28) ++/* common: */ ++#define SAVAGE_BD_TILE_LINEAR 0 ++/* savage4, MX, IX, 3D */ ++#define SAVAGE_BD_TILE_16BPP 2 ++#define SAVAGE_BD_TILE_32BPP 3 ++/* twister, prosavage, DDR, supersavage, 2000 */ ++#define SAVAGE_BD_TILE_DEST 1 ++#define SAVAGE_BD_TILE_TEXTURE 2 ++/* GBD - BCI enable */ ++/* savage4, MX, IX, 3D */ ++#define SAVAGE_GBD_BCI_ENABLE 8 ++/* twister, prosavage, DDR, supersavage, 2000 */ ++#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0 ++ ++#define SAVAGE_GBD_BIG_ENDIAN 4 ++#define SAVAGE_GBD_LITTLE_ENDIAN 0 ++#define SAVAGE_GBD_64 1 ++ ++/* Global Bitmap Descriptor */ ++#define SAVAGE_BCI_GLB_BD_LOW 0x8168 ++#define SAVAGE_BCI_GLB_BD_HIGH 0x816C ++ ++/* ++ * BCI registers ++ */ ++/* Savage4/Twister/ProSavage 3D registers */ ++#define SAVAGE_DRAWLOCALCTRL_S4 0x1e ++#define SAVAGE_TEXPALADDR_S4 0x1f ++#define SAVAGE_TEXCTRL0_S4 0x20 ++#define SAVAGE_TEXCTRL1_S4 0x21 ++#define SAVAGE_TEXADDR0_S4 0x22 ++#define SAVAGE_TEXADDR1_S4 0x23 ++#define SAVAGE_TEXBLEND0_S4 0x24 ++#define SAVAGE_TEXBLEND1_S4 0x25 ++#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ ++#define SAVAGE_TEXDESCR_S4 0x27 ++#define SAVAGE_FOGTABLE_S4 0x28 ++#define SAVAGE_FOGCTRL_S4 0x30 ++#define SAVAGE_STENCILCTRL_S4 0x31 ++#define SAVAGE_ZBUFCTRL_S4 0x32 ++#define SAVAGE_ZBUFOFF_S4 0x33 ++#define SAVAGE_DESTCTRL_S4 0x34 ++#define SAVAGE_DRAWCTRL0_S4 0x35 ++#define SAVAGE_DRAWCTRL1_S4 0x36 ++#define SAVAGE_ZWATERMARK_S4 0x37 ++#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38 ++#define SAVAGE_TEXBLENDCOLOR_S4 0x39 ++/* Savage3D/MX/IX 3D registers */ ++#define SAVAGE_TEXPALADDR_S3D 0x18 ++#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ ++#define SAVAGE_TEXADDR_S3D 0x1A ++#define SAVAGE_TEXDESCR_S3D 0x1B ++#define SAVAGE_TEXCTRL_S3D 0x1C ++#define SAVAGE_FOGTABLE_S3D 0x20 ++#define SAVAGE_FOGCTRL_S3D 0x30 ++#define SAVAGE_DRAWCTRL_S3D 0x31 ++#define SAVAGE_ZBUFCTRL_S3D 0x32 ++#define SAVAGE_ZBUFOFF_S3D 0x33 ++#define SAVAGE_DESTCTRL_S3D 0x34 ++#define SAVAGE_SCSTART_S3D 0x35 ++#define SAVAGE_SCEND_S3D 0x36 ++#define SAVAGE_ZWATERMARK_S3D 0x37 ++#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38 ++/* common stuff */ ++#define SAVAGE_VERTBUFADDR 0x3e ++#define SAVAGE_BITPLANEWTMASK 0xd7 ++#define SAVAGE_DMABUFADDR 0x51 ++ ++/* texture enable bits (needed for tex addr checking) */ ++#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ ++#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ ++#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ ++ ++/* Global fields in Savage4/Twister/ProSavage 3D registers: ++ * ++ * All texture registers and DrawLocalCtrl are local. All other ++ * registers are global. */ ++ ++/* Global fields in Savage3D/MX/IX 3D registers: ++ * ++ * All texture registers are local. DrawCtrl and ZBufCtrl are ++ * partially local. All other registers are global. ++ * ++ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal ++ * ZBufCtrl global fields: zCmpFunc, zBufEn ++ */ ++#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c ++#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027 ++ ++/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d) ++ */ ++#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff ++#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff ++ ++/* ++ * BCI commands ++ */ ++#define BCI_CMD_NOP 0x40000000 ++#define BCI_CMD_RECT 0x48000000 ++#define BCI_CMD_RECT_XP 0x01000000 ++#define BCI_CMD_RECT_YP 0x02000000 ++#define BCI_CMD_SCANLINE 0x50000000 ++#define BCI_CMD_LINE 0x5C000000 ++#define BCI_CMD_LINE_LAST_PIXEL 0x58000000 ++#define BCI_CMD_BYTE_TEXT 0x63000000 ++#define BCI_CMD_NT_BYTE_TEXT 0x67000000 ++#define BCI_CMD_BIT_TEXT 0x6C000000 ++#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF) ++#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16)) ++#define BCI_CMD_SEND_COLOR 0x00008000 ++ ++#define BCI_CMD_CLIP_NONE 0x00000000 ++#define BCI_CMD_CLIP_CURRENT 0x00002000 ++#define BCI_CMD_CLIP_LR 0x00004000 ++#define BCI_CMD_CLIP_NEW 0x00006000 ++ ++#define BCI_CMD_DEST_GBD 0x00000000 ++#define BCI_CMD_DEST_PBD 0x00000800 ++#define BCI_CMD_DEST_PBD_NEW 0x00000C00 ++#define BCI_CMD_DEST_SBD 0x00001000 ++#define BCI_CMD_DEST_SBD_NEW 0x00001400 ++ ++#define BCI_CMD_SRC_TRANSPARENT 0x00000200 ++#define BCI_CMD_SRC_SOLID 0x00000000 ++#define BCI_CMD_SRC_GBD 0x00000020 ++#define BCI_CMD_SRC_COLOR 0x00000040 ++#define BCI_CMD_SRC_MONO 0x00000060 ++#define BCI_CMD_SRC_PBD_COLOR 0x00000080 ++#define BCI_CMD_SRC_PBD_MONO 0x000000A0 ++#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0 ++#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0 ++#define BCI_CMD_SRC_SBD_COLOR 0x00000100 ++#define BCI_CMD_SRC_SBD_MONO 0x00000120 ++#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140 ++#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160 ++ ++#define BCI_CMD_PAT_TRANSPARENT 0x00000010 ++#define BCI_CMD_PAT_NONE 0x00000000 ++#define BCI_CMD_PAT_COLOR 0x00000002 ++#define BCI_CMD_PAT_MONO 0x00000003 ++#define BCI_CMD_PAT_PBD_COLOR 0x00000004 ++#define BCI_CMD_PAT_PBD_MONO 0x00000005 ++#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006 ++#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007 ++#define BCI_CMD_PAT_SBD_COLOR 0x00000008 ++#define BCI_CMD_PAT_SBD_MONO 0x00000009 ++#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A ++#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B ++ ++#define BCI_BD_BW_DISABLE 0x10000000 ++#define BCI_BD_TILE_MASK 0x03000000 ++#define BCI_BD_TILE_NONE 0x00000000 ++#define BCI_BD_TILE_16 0x02000000 ++#define BCI_BD_TILE_32 0x03000000 ++#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF) ++#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16)) ++#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF) ++#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF)) ++ ++#define BCI_CMD_SET_REGISTER 0x96000000 ++ ++#define BCI_CMD_WAIT 0xC0000000 ++#define BCI_CMD_WAIT_3D 0x00010000 ++#define BCI_CMD_WAIT_2D 0x00020000 ++ ++#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000 ++ ++#define BCI_CMD_DRAW_PRIM 0x80000000 ++#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000 ++#define BCI_CMD_DRAW_CONT 0x01000000 ++#define BCI_CMD_DRAW_TRILIST 0x00000000 ++#define BCI_CMD_DRAW_TRISTRIP 0x02000000 ++#define BCI_CMD_DRAW_TRIFAN 0x04000000 ++#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff ++#define BCI_CMD_DRAW_NO_Z 0x00000001 ++#define BCI_CMD_DRAW_NO_W 0x00000002 ++#define BCI_CMD_DRAW_NO_CD 0x00000004 ++#define BCI_CMD_DRAW_NO_CS 0x00000008 ++#define BCI_CMD_DRAW_NO_U0 0x00000010 ++#define BCI_CMD_DRAW_NO_V0 0x00000020 ++#define BCI_CMD_DRAW_NO_UV0 0x00000030 ++#define BCI_CMD_DRAW_NO_U1 0x00000040 ++#define BCI_CMD_DRAW_NO_V1 0x00000080 ++#define BCI_CMD_DRAW_NO_UV1 0x000000c0 ++ ++#define BCI_CMD_DMA 0xa8000000 ++ ++#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF) ++#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF) ++#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF) ++#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) ++#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) ++#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF) ++ ++#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF)) ++#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF)) ++#define BCI_LINE_MISC(maj, ym, xp, yp, err) \ ++ (((maj) & 0x1FFF) | \ ++ ((ym) ? 1<<13 : 0) | \ ++ ((xp) ? 1<<14 : 0) | \ ++ ((yp) ? 1<<15 : 0) | \ ++ ((err) << 16)) ++ ++/* ++ * common commands ++ */ ++#define BCI_SET_REGISTERS( first, n ) \ ++ BCI_WRITE(BCI_CMD_SET_REGISTER | \ ++ ((uint32_t)(n) & 0xff) << 16 | \ ++ ((uint32_t)(first) & 0xffff)) ++#define DMA_SET_REGISTERS( first, n ) \ ++ DMA_WRITE(BCI_CMD_SET_REGISTER | \ ++ ((uint32_t)(n) & 0xff) << 16 | \ ++ ((uint32_t)(first) & 0xffff)) ++ ++#define BCI_DRAW_PRIMITIVE(n, type, skip) \ ++ BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ ++ ((n) << 16)) ++#define DMA_DRAW_PRIMITIVE(n, type, skip) \ ++ DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ ++ ((n) << 16)) ++ ++#define BCI_DRAW_INDICES_S3D(n, type, i0) \ ++ BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ ++ ((n) << 16) | (i0)) ++ ++#define BCI_DRAW_INDICES_S4(n, type, skip) \ ++ BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ ++ (skip) | ((n) << 16)) ++ ++#define BCI_DMA(n) \ ++ BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1)) ++ ++/* ++ * access to MMIO ++ */ ++#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) ++#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) ) ++ ++/* ++ * access to the burst command interface (BCI) ++ */ ++#define SAVAGE_BCI_DEBUG 1 ++ ++#define BCI_LOCALS volatile uint32_t *bci_ptr; ++ ++#define BEGIN_BCI( n ) do { \ ++ dev_priv->wait_fifo(dev_priv, (n)); \ ++ bci_ptr = dev_priv->bci_ptr; \ ++} while(0) ++ ++#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) ++ ++/* ++ * command DMA support ++ */ ++#define SAVAGE_DMA_DEBUG 1 ++ ++#define DMA_LOCALS uint32_t *dma_ptr; ++ ++#define BEGIN_DMA( n ) do { \ ++ unsigned int cur = dev_priv->current_dma_page; \ ++ unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \ ++ dev_priv->dma_pages[cur].used; \ ++ if ((n) > rest) { \ ++ dma_ptr = savage_dma_alloc(dev_priv, (n)); \ ++ } else { /* fast path for small allocations */ \ ++ dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \ ++ cur * SAVAGE_DMA_PAGE_SIZE + \ ++ dev_priv->dma_pages[cur].used; \ ++ if (dev_priv->dma_pages[cur].used == 0) \ ++ savage_dma_wait(dev_priv, cur); \ ++ dev_priv->dma_pages[cur].used += (n); \ ++ } \ ++} while(0) ++ ++#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) ++ ++#define DMA_COPY(src, n) do { \ ++ memcpy(dma_ptr, (src), (n)*4); \ ++ dma_ptr += n; \ ++} while(0) ++ ++#if SAVAGE_DMA_DEBUG ++#define DMA_COMMIT() do { \ ++ unsigned int cur = dev_priv->current_dma_page; \ ++ uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \ ++ cur * SAVAGE_DMA_PAGE_SIZE + \ ++ dev_priv->dma_pages[cur].used; \ ++ if (dma_ptr != expected) { \ ++ DRM_ERROR("DMA allocation and use don't match: " \ ++ "%p != %p\n", expected, dma_ptr); \ ++ savage_dma_reset(dev_priv); \ ++ } \ ++} while(0) ++#else ++#define DMA_COMMIT() do {/* nothing */} while(0) ++#endif ++ ++#define DMA_FLUSH() dev_priv->dma_flush(dev_priv) ++ ++/* Buffer aging via event tag ++ */ ++ ++#define UPDATE_EVENT_COUNTER( ) do { \ ++ if (dev_priv->status_ptr) { \ ++ uint16_t count; \ ++ /* coordinate with Xserver */ \ ++ count = dev_priv->status_ptr[1023]; \ ++ if (count < dev_priv->event_counter) \ ++ dev_priv->event_wrap++; \ ++ dev_priv->event_counter = count; \ ++ } \ ++} while(0) ++ ++#define SET_AGE( age, e, w ) do { \ ++ (age)->event = e; \ ++ (age)->wrap = w; \ ++} while(0) ++ ++#define TEST_AGE( age, e, w ) \ ++ ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) ++ ++#endif /* __SAVAGE_DRV_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/savage_state.c git-nokia/drivers/gpu/drm-tungsten/savage_state.c +--- git/drivers/gpu/drm-tungsten/savage_state.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/savage_state.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,1165 @@ ++/* savage_state.c -- State and drawing support for Savage ++ * ++ * Copyright 2004 Felix Kuehling ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sub license, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++#include "drmP.h" ++#include "savage_drm.h" ++#include "savage_drv.h" ++ ++void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox) ++{ ++ uint32_t scstart = dev_priv->state.s3d.new_scstart; ++ uint32_t scend = dev_priv->state.s3d.new_scend; ++ scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | ++ ((uint32_t)pbox->x1 & 0x000007ff) | ++ (((uint32_t)pbox->y1 << 16) & 0x07ff0000); ++ scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | ++ (((uint32_t)pbox->x2 - 1) & 0x000007ff) | ++ ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000); ++ if (scstart != dev_priv->state.s3d.scstart || ++ scend != dev_priv->state.s3d.scend) { ++ DMA_LOCALS; ++ BEGIN_DMA(4); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); ++ DMA_WRITE(scstart); ++ DMA_WRITE(scend); ++ dev_priv->state.s3d.scstart = scstart; ++ dev_priv->state.s3d.scend = scend; ++ dev_priv->waiting = 1; ++ DMA_COMMIT(); ++ } ++} ++ ++void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, ++ const struct drm_clip_rect *pbox) ++{ ++ uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; ++ uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; ++ drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | ++ ((uint32_t)pbox->x1 & 0x000007ff) | ++ (((uint32_t)pbox->y1 << 12) & 0x00fff000); ++ drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | ++ (((uint32_t)pbox->x2 - 1) & 0x000007ff) | ++ ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000); ++ if (drawctrl0 != dev_priv->state.s4.drawctrl0 || ++ drawctrl1 != dev_priv->state.s4.drawctrl1) { ++ DMA_LOCALS; ++ BEGIN_DMA(4); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); ++ DMA_WRITE(drawctrl0); ++ DMA_WRITE(drawctrl1); ++ dev_priv->state.s4.drawctrl0 = drawctrl0; ++ dev_priv->state.s4.drawctrl1 = drawctrl1; ++ dev_priv->waiting = 1; ++ DMA_COMMIT(); ++ } ++} ++ ++static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, ++ uint32_t addr) ++{ ++ if ((addr & 6) != 2) { /* reserved bits */ ++ DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); ++ return -EINVAL; ++ } ++ if (!(addr & 1)) { /* local */ ++ addr &= ~7; ++ if (addr < dev_priv->texture_offset || ++ addr >= dev_priv->texture_offset + dev_priv->texture_size) { ++ DRM_ERROR ++ ("bad texAddr%d %08x (local addr out of range)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ } else { /* AGP */ ++ if (!dev_priv->agp_textures) { ++ DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ addr &= ~7; ++ if (addr < dev_priv->agp_textures->offset || ++ addr >= (dev_priv->agp_textures->offset + ++ dev_priv->agp_textures->size)) { ++ DRM_ERROR ++ ("bad texAddr%d %08x (AGP addr out of range)\n", ++ unit, addr); ++ return -EINVAL; ++ } ++ } ++ return 0; ++} ++ ++#define SAVE_STATE(reg,where) \ ++ if(start <= reg && start + count > reg) \ ++ dev_priv->state.where = regs[reg - start] ++#define SAVE_STATE_MASK(reg,where,mask) do { \ ++ if(start <= reg && start + count > reg) { \ ++ uint32_t tmp; \ ++ tmp = regs[reg - start]; \ ++ dev_priv->state.where = (tmp & (mask)) | \ ++ (dev_priv->state.where & ~(mask)); \ ++ } \ ++} while (0) ++static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, ++ unsigned int start, unsigned int count, ++ const uint32_t *regs) ++{ ++ if (start < SAVAGE_TEXPALADDR_S3D || ++ start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { ++ DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", ++ start, start + count - 1); ++ return -EINVAL; ++ } ++ ++ SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, ++ ~SAVAGE_SCISSOR_MASK_S3D); ++ SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend, ++ ~SAVAGE_SCISSOR_MASK_S3D); ++ ++ /* if any texture regs were changed ... */ ++ if (start <= SAVAGE_TEXCTRL_S3D && ++ start + count > SAVAGE_TEXPALADDR_S3D) { ++ /* ... check texture state */ ++ SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); ++ SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); ++ if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) ++ return savage_verify_texaddr(dev_priv, 0, ++ dev_priv->state.s3d.texaddr); ++ } ++ ++ return 0; ++} ++ ++static int savage_verify_state_s4(drm_savage_private_t *dev_priv, ++ unsigned int start, unsigned int count, ++ const uint32_t *regs) ++{ ++ int ret = 0; ++ ++ if (start < SAVAGE_DRAWLOCALCTRL_S4 || ++ start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { ++ DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", ++ start, start + count - 1); ++ return -EINVAL; ++ } ++ ++ SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, ++ ~SAVAGE_SCISSOR_MASK_S4); ++ SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1, ++ ~SAVAGE_SCISSOR_MASK_S4); ++ ++ /* if any texture regs were changed ... */ ++ if (start <= SAVAGE_TEXDESCR_S4 && ++ start + count > SAVAGE_TEXPALADDR_S4) { ++ /* ... check texture state */ ++ SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); ++ SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); ++ SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); ++ if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) ++ ret |= savage_verify_texaddr(dev_priv, 0, ++ dev_priv->state.s4.texaddr0); ++ if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) ++ ret |= savage_verify_texaddr(dev_priv, 1, ++ dev_priv->state.s4.texaddr1); ++ } ++ ++ return ret; ++} ++#undef SAVE_STATE ++#undef SAVE_STATE_MASK ++ ++static int savage_dispatch_state(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint32_t *regs) ++{ ++ unsigned int count = cmd_header->state.count; ++ unsigned int start = cmd_header->state.start; ++ unsigned int count2 = 0; ++ unsigned int bci_size; ++ int ret; ++ DMA_LOCALS; ++ ++ if (!count) ++ return 0; ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ ret = savage_verify_state_s3d(dev_priv, start, count, regs); ++ if (ret != 0) ++ return ret; ++ /* scissor regs are emitted in savage_dispatch_draw */ ++ if (start < SAVAGE_SCSTART_S3D) { ++ if (start + count > SAVAGE_SCEND_S3D + 1) ++ count2 = count - (SAVAGE_SCEND_S3D + 1 - start); ++ if (start + count > SAVAGE_SCSTART_S3D) ++ count = SAVAGE_SCSTART_S3D - start; ++ } else if (start <= SAVAGE_SCEND_S3D) { ++ if (start + count > SAVAGE_SCEND_S3D + 1) { ++ count -= SAVAGE_SCEND_S3D + 1 - start; ++ start = SAVAGE_SCEND_S3D + 1; ++ } else ++ return 0; ++ } ++ } else { ++ ret = savage_verify_state_s4(dev_priv, start, count, regs); ++ if (ret != 0) ++ return ret; ++ /* scissor regs are emitted in savage_dispatch_draw */ ++ if (start < SAVAGE_DRAWCTRL0_S4) { ++ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) ++ count2 = count - ++ (SAVAGE_DRAWCTRL1_S4 + 1 - start); ++ if (start + count > SAVAGE_DRAWCTRL0_S4) ++ count = SAVAGE_DRAWCTRL0_S4 - start; ++ } else if (start <= SAVAGE_DRAWCTRL1_S4) { ++ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { ++ count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; ++ start = SAVAGE_DRAWCTRL1_S4 + 1; ++ } else ++ return 0; ++ } ++ } ++ ++ bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255; ++ ++ if (cmd_header->state.global) { ++ BEGIN_DMA(bci_size + 1); ++ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); ++ dev_priv->waiting = 1; ++ } else { ++ BEGIN_DMA(bci_size); ++ } ++ ++ do { ++ while (count > 0) { ++ unsigned int n = count < 255 ? count : 255; ++ DMA_SET_REGISTERS(start, n); ++ DMA_COPY(regs, n); ++ count -= n; ++ start += n; ++ regs += n; ++ } ++ start += 2; ++ regs += 2; ++ count = count2; ++ count2 = 0; ++ } while (count); ++ ++ DMA_COMMIT(); ++ ++ return 0; ++} ++ ++static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const struct drm_buf *dmabuf) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->prim.prim; ++ unsigned int skip = cmd_header->prim.skip; ++ unsigned int n = cmd_header->prim.count; ++ unsigned int start = cmd_header->prim.start; ++ unsigned int i; ++ BCI_LOCALS; ++ ++ if (!dmabuf) { ++ DRM_ERROR("called without dma buffers!\n"); ++ return -EINVAL; ++ } ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of vertices %u in TRILIST\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of vertices %u in TRIFAN/STRIP\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip != 0) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ } else { ++ unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ++ (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - ++ (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); ++ if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ if (reorder) { ++ DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (start + n > dmabuf->total / 32) { ++ DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", ++ start, start + n - 1, dmabuf->total / 32); ++ return -EINVAL; ++ } ++ ++ /* Vertex DMA doesn't work with command DMA at the same time, ++ * so we use BCI_... to submit commands here. Flush buffered ++ * faked DMA first. */ ++ DMA_FLUSH(); ++ ++ if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { ++ BEGIN_BCI(2); ++ BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); ++ BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); ++ dev_priv->state.common.vbaddr = dmabuf->bus_address; ++ } ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { ++ /* Workaround for what looks like a hardware bug. If a ++ * WAIT_3D_IDLE was emitted some time before the ++ * indexed drawing command then the engine will lock ++ * up. There are two known workarounds: ++ * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ ++ BEGIN_BCI(63); ++ for (i = 0; i < 63; ++i) ++ BCI_WRITE(BCI_CMD_WAIT); ++ dev_priv->waiting = 0; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 indices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ if (reorder) { ++ /* Need to reorder indices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { -1, -1, -1 }; ++ reorder[start % 3] = 2; ++ ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, start + 2); ++ ++ for (i = start + 1; i + 1 < start + count; i += 2) ++ BCI_WRITE((i + reorder[i % 3]) | ++ ((i + 1 + ++ reorder[(i + 1) % 3]) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i + reorder[i % 3]); ++ } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, start); ++ ++ for (i = start + 1; i + 1 < start + count; i += 2) ++ BCI_WRITE(i | ((i + 1) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i); ++ } else { ++ BEGIN_BCI((count + 2 + 1) / 2); ++ BCI_DRAW_INDICES_S4(count, prim, skip); ++ ++ for (i = start; i + 1 < start + count; i += 2) ++ BCI_WRITE(i | ((i + 1) << 16)); ++ if (i < start + count) ++ BCI_WRITE(i); ++ } ++ ++ start += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint32_t *vtxbuf, unsigned int vb_size, ++ unsigned int vb_stride) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->prim.prim; ++ unsigned int skip = cmd_header->prim.skip; ++ unsigned int n = cmd_header->prim.count; ++ unsigned int start = cmd_header->prim.start; ++ unsigned int vtx_size; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of vertices %u in TRILIST\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of vertices %u in TRIFAN/STRIP\n", ++ n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip > SAVAGE_SKIP_ALL_S3D) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 8; /* full vertex */ ++ } else { ++ if (skip > SAVAGE_SKIP_ALL_S4) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 10; /* full vertex */ ++ } ++ ++ vtx_size -= (skip & 1) + (skip >> 1 & 1) + ++ (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + ++ (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); ++ ++ if (vtx_size > vb_stride) { ++ DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", ++ vtx_size, vb_stride); ++ return -EINVAL; ++ } ++ ++ if (start + n > vb_size / (vb_stride * 4)) { ++ DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", ++ start, start + n - 1, vb_size / (vb_stride * 4)); ++ return -EINVAL; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 vertices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ if (reorder) { ++ /* Need to reorder vertices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { -1, -1, -1 }; ++ reorder[start % 3] = 2; ++ ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = start; i < start + count; ++i) { ++ unsigned int j = i + reorder[i % 3]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } else { ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ if (vb_stride == vtx_size) { ++ DMA_COPY(&vtxbuf[vb_stride * start], ++ vtx_size * count); ++ } else { ++ for (i = start; i < start + count; ++i) { ++ DMA_COPY(&vtxbuf[vb_stride * i], ++ vtx_size); ++ } ++ } ++ ++ DMA_COMMIT(); ++ } ++ ++ start += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint16_t *idx, ++ const struct drm_buf *dmabuf) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->idx.prim; ++ unsigned int skip = cmd_header->idx.skip; ++ unsigned int n = cmd_header->idx.count; ++ unsigned int i; ++ BCI_LOCALS; ++ ++ if (!dmabuf) { ++ DRM_ERROR("called without dma buffers!\n"); ++ return -EINVAL; ++ } ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of indices %u in TRILIST\n", n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of indices %u in TRIFAN/STRIP\n", n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip != 0) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ } else { ++ unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - ++ (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - ++ (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); ++ if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { ++ DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); ++ return -EINVAL; ++ } ++ if (reorder) { ++ DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); ++ return -EINVAL; ++ } ++ } ++ ++ /* Vertex DMA doesn't work with command DMA at the same time, ++ * so we use BCI_... to submit commands here. Flush buffered ++ * faked DMA first. */ ++ DMA_FLUSH(); ++ ++ if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { ++ BEGIN_BCI(2); ++ BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); ++ BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); ++ dev_priv->state.common.vbaddr = dmabuf->bus_address; ++ } ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { ++ /* Workaround for what looks like a hardware bug. If a ++ * WAIT_3D_IDLE was emitted some time before the ++ * indexed drawing command then the engine will lock ++ * up. There are two known workarounds: ++ * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ ++ BEGIN_BCI(63); ++ for (i = 0; i < 63; ++i) ++ BCI_WRITE(BCI_CMD_WAIT); ++ dev_priv->waiting = 0; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 indices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ ++ /* check indices */ ++ for (i = 0; i < count; ++i) { ++ if (idx[i] > dmabuf->total / 32) { ++ DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ++ i, idx[i], dmabuf->total / 32); ++ return -EINVAL; ++ } ++ } ++ ++ if (reorder) { ++ /* Need to reorder indices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { 2, -1, -1 }; ++ ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, idx[2]); ++ ++ for (i = 1; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i + reorder[i % 3]] | ++ (idx[i + 1 + ++ reorder[(i + 1) % 3]] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i + reorder[i % 3]]); ++ } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ BEGIN_BCI((count + 1 + 1) / 2); ++ BCI_DRAW_INDICES_S3D(count, prim, idx[0]); ++ ++ for (i = 1; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i] | (idx[i + 1] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i]); ++ } else { ++ BEGIN_BCI((count + 2 + 1) / 2); ++ BCI_DRAW_INDICES_S4(count, prim, skip); ++ ++ for (i = 0; i + 1 < count; i += 2) ++ BCI_WRITE(idx[i] | (idx[i + 1] << 16)); ++ if (i < count) ++ BCI_WRITE(idx[i]); ++ } ++ ++ idx += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const uint16_t *idx, ++ const uint32_t *vtxbuf, ++ unsigned int vb_size, unsigned int vb_stride) ++{ ++ unsigned char reorder = 0; ++ unsigned int prim = cmd_header->idx.prim; ++ unsigned int skip = cmd_header->idx.skip; ++ unsigned int n = cmd_header->idx.count; ++ unsigned int vtx_size; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (!n) ++ return 0; ++ ++ switch (prim) { ++ case SAVAGE_PRIM_TRILIST_201: ++ reorder = 1; ++ prim = SAVAGE_PRIM_TRILIST; ++ case SAVAGE_PRIM_TRILIST: ++ if (n % 3 != 0) { ++ DRM_ERROR("wrong number of indices %u in TRILIST\n", n); ++ return -EINVAL; ++ } ++ break; ++ case SAVAGE_PRIM_TRISTRIP: ++ case SAVAGE_PRIM_TRIFAN: ++ if (n < 3) { ++ DRM_ERROR ++ ("wrong number of indices %u in TRIFAN/STRIP\n", n); ++ return -EINVAL; ++ } ++ break; ++ default: ++ DRM_ERROR("invalid primitive type %u\n", prim); ++ return -EINVAL; ++ } ++ ++ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { ++ if (skip > SAVAGE_SKIP_ALL_S3D) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 8; /* full vertex */ ++ } else { ++ if (skip > SAVAGE_SKIP_ALL_S4) { ++ DRM_ERROR("invalid skip flags 0x%04x\n", skip); ++ return -EINVAL; ++ } ++ vtx_size = 10; /* full vertex */ ++ } ++ ++ vtx_size -= (skip & 1) + (skip >> 1 & 1) + ++ (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + ++ (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); ++ ++ if (vtx_size > vb_stride) { ++ DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", ++ vtx_size, vb_stride); ++ return -EINVAL; ++ } ++ ++ prim <<= 25; ++ while (n != 0) { ++ /* Can emit up to 255 vertices (85 triangles) at once. */ ++ unsigned int count = n > 255 ? 255 : n; ++ ++ /* Check indices */ ++ for (i = 0; i < count; ++i) { ++ if (idx[i] > vb_size / (vb_stride * 4)) { ++ DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", ++ i, idx[i], vb_size / (vb_stride * 4)); ++ return -EINVAL; ++ } ++ } ++ ++ if (reorder) { ++ /* Need to reorder vertices for correct flat ++ * shading while preserving the clock sense ++ * for correct culling. Only on Savage3D. */ ++ int reorder[3] = { 2, -1, -1 }; ++ ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = 0; i < count; ++i) { ++ unsigned int j = idx[i + reorder[i % 3]]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } else { ++ BEGIN_DMA(count * vtx_size + 1); ++ DMA_DRAW_PRIMITIVE(count, prim, skip); ++ ++ for (i = 0; i < count; ++i) { ++ unsigned int j = idx[i]; ++ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); ++ } ++ ++ DMA_COMMIT(); ++ } ++ ++ idx += count; ++ n -= count; ++ ++ prim |= BCI_CMD_DRAW_CONT; ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_clear(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *cmd_header, ++ const drm_savage_cmd_header_t *data, ++ unsigned int nbox, ++ const struct drm_clip_rect *boxes) ++{ ++ unsigned int flags = cmd_header->clear0.flags; ++ unsigned int clear_cmd; ++ unsigned int i, nbufs; ++ DMA_LOCALS; ++ ++ if (nbox == 0) ++ return 0; ++ ++ clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | ++ BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; ++ BCI_CMD_SET_ROP(clear_cmd,0xCC); ++ ++ nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + ++ ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0); ++ if (nbufs == 0) ++ return 0; ++ ++ if (data->clear1.mask != 0xffffffff) { ++ /* set mask */ ++ BEGIN_DMA(2); ++ DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ++ DMA_WRITE(data->clear1.mask); ++ DMA_COMMIT(); ++ } ++ for (i = 0; i < nbox; ++i) { ++ unsigned int x, y, w, h; ++ unsigned int buf; ++ ++ x = boxes[i].x1, y = boxes[i].y1; ++ w = boxes[i].x2 - boxes[i].x1; ++ h = boxes[i].y2 - boxes[i].y1; ++ BEGIN_DMA(nbufs * 6); ++ for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { ++ if (!(flags & buf)) ++ continue; ++ DMA_WRITE(clear_cmd); ++ switch (buf) { ++ case SAVAGE_FRONT: ++ DMA_WRITE(dev_priv->front_offset); ++ DMA_WRITE(dev_priv->front_bd); ++ break; ++ case SAVAGE_BACK: ++ DMA_WRITE(dev_priv->back_offset); ++ DMA_WRITE(dev_priv->back_bd); ++ break; ++ case SAVAGE_DEPTH: ++ DMA_WRITE(dev_priv->depth_offset); ++ DMA_WRITE(dev_priv->depth_bd); ++ break; ++ } ++ DMA_WRITE(data->clear1.value); ++ DMA_WRITE(BCI_X_Y(x, y)); ++ DMA_WRITE(BCI_W_H(w, h)); ++ } ++ DMA_COMMIT(); ++ } ++ if (data->clear1.mask != 0xffffffff) { ++ /* reset mask */ ++ BEGIN_DMA(2); ++ DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); ++ DMA_WRITE(0xffffffff); ++ DMA_COMMIT(); ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_swap(drm_savage_private_t *dev_priv, ++ unsigned int nbox, const struct drm_clip_rect *boxes) ++{ ++ unsigned int swap_cmd; ++ unsigned int i; ++ DMA_LOCALS; ++ ++ if (nbox == 0) ++ return 0; ++ ++ swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | ++ BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; ++ BCI_CMD_SET_ROP(swap_cmd,0xCC); ++ ++ for (i = 0; i < nbox; ++i) { ++ BEGIN_DMA(6); ++ DMA_WRITE(swap_cmd); ++ DMA_WRITE(dev_priv->back_offset); ++ DMA_WRITE(dev_priv->back_bd); ++ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); ++ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); ++ DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, ++ boxes[i].y2 - boxes[i].y1)); ++ DMA_COMMIT(); ++ } ++ ++ return 0; ++} ++ ++static int savage_dispatch_draw(drm_savage_private_t *dev_priv, ++ const drm_savage_cmd_header_t *start, ++ const drm_savage_cmd_header_t *end, ++ const struct drm_buf *dmabuf, ++ const unsigned int *vtxbuf, ++ unsigned int vb_size, unsigned int vb_stride, ++ unsigned int nbox, ++ const struct drm_clip_rect *boxes) ++{ ++ unsigned int i, j; ++ int ret; ++ ++ for (i = 0; i < nbox; ++i) { ++ const drm_savage_cmd_header_t *cmdbuf; ++ dev_priv->emit_clip_rect(dev_priv, &boxes[i]); ++ ++ cmdbuf = start; ++ while (cmdbuf < end) { ++ drm_savage_cmd_header_t cmd_header; ++ cmd_header = *cmdbuf; ++ cmdbuf++; ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_DMA_PRIM: ++ ret = savage_dispatch_dma_prim( ++ dev_priv, &cmd_header, dmabuf); ++ break; ++ case SAVAGE_CMD_VB_PRIM: ++ ret = savage_dispatch_vb_prim( ++ dev_priv, &cmd_header, ++ vtxbuf, vb_size, vb_stride); ++ break; ++ case SAVAGE_CMD_DMA_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ /* j was check in savage_bci_cmdbuf */ ++ ret = savage_dispatch_dma_idx(dev_priv, ++ &cmd_header, (const uint16_t *)cmdbuf, ++ dmabuf); ++ cmdbuf += j; ++ break; ++ case SAVAGE_CMD_VB_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ /* j was check in savage_bci_cmdbuf */ ++ ret = savage_dispatch_vb_idx(dev_priv, ++ &cmd_header, (const uint16_t *)cmdbuf, ++ (const uint32_t *)vtxbuf, vb_size, ++ vb_stride); ++ cmdbuf += j; ++ break; ++ default: ++ /* What's the best return code? EFAULT? */ ++ DRM_ERROR("IMPLEMENTATION ERROR: " ++ "non-drawing-command %d\n", ++ cmd_header.cmd.cmd); ++ return -EINVAL; ++ } ++ ++ if (ret != 0) ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) ++{ ++ drm_savage_private_t *dev_priv = dev->dev_private; ++ struct drm_device_dma *dma = dev->dma; ++ struct drm_buf *dmabuf; ++ drm_savage_cmdbuf_t *cmdbuf = data; ++ drm_savage_cmd_header_t *kcmd_addr = NULL; ++ drm_savage_cmd_header_t *first_draw_cmd; ++ unsigned int *kvb_addr = NULL; ++ struct drm_clip_rect *kbox_addr = NULL; ++ unsigned int i, j; ++ int ret = 0; ++ ++ DRM_DEBUG("\n"); ++ ++ LOCK_TEST_WITH_RETURN(dev, file_priv); ++ ++ if (dma && dma->buflist) { ++ if (cmdbuf->dma_idx > dma->buf_count) { ++ DRM_ERROR ++ ("vertex buffer index %u out of range (0-%u)\n", ++ cmdbuf->dma_idx, dma->buf_count - 1); ++ return -EINVAL; ++ } ++ dmabuf = dma->buflist[cmdbuf->dma_idx]; ++ } else { ++ dmabuf = NULL; ++ } ++ ++ /* Copy the user buffers into kernel temporary areas. This hasn't been ++ * a performance loss compared to VERIFYAREA_READ/ ++ * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct ++ * for locking on FreeBSD. ++ */ ++ if (cmdbuf->size) { ++ kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER); ++ if (kcmd_addr == NULL) ++ return -ENOMEM; ++ ++ if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr, ++ cmdbuf->size * 8)) ++ { ++ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); ++ return -EFAULT; ++ } ++ cmdbuf->cmd_addr = kcmd_addr; ++ } ++ if (cmdbuf->vb_size) { ++ kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER); ++ if (kvb_addr == NULL) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr, ++ cmdbuf->vb_size)) { ++ ret = -EFAULT; ++ goto done; ++ } ++ cmdbuf->vb_addr = kvb_addr; ++ } ++ if (cmdbuf->nbox) { ++ kbox_addr = drm_alloc(cmdbuf->nbox * ++ sizeof(struct drm_clip_rect), ++ DRM_MEM_DRIVER); ++ if (kbox_addr == NULL) { ++ ret = -ENOMEM; ++ goto done; ++ } ++ ++ if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, ++ cmdbuf->nbox * ++ sizeof(struct drm_clip_rect))) { ++ ret = -EFAULT; ++ goto done; ++ } ++ cmdbuf->box_addr = kbox_addr; ++ } ++ ++ /* Make sure writes to DMA buffers are finished before sending ++ * DMA commands to the graphics hardware. */ ++ DRM_MEMORYBARRIER(); ++ ++ /* Coming from user space. Don't know if the Xserver has ++ * emitted wait commands. Assuming the worst. */ ++ dev_priv->waiting = 1; ++ ++ i = 0; ++ first_draw_cmd = NULL; ++ while (i < cmdbuf->size) { ++ drm_savage_cmd_header_t cmd_header; ++ cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr; ++ cmdbuf->cmd_addr++; ++ i++; ++ ++ /* Group drawing commands with same state to minimize ++ * iterations over clip rects. */ ++ j = 0; ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_DMA_IDX: ++ case SAVAGE_CMD_VB_IDX: ++ j = (cmd_header.idx.count + 3) / 4; ++ if (i + j > cmdbuf->size) { ++ DRM_ERROR("indexed drawing command extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ return -EINVAL; ++ } ++ /* fall through */ ++ case SAVAGE_CMD_DMA_PRIM: ++ case SAVAGE_CMD_VB_PRIM: ++ if (!first_draw_cmd) ++ first_draw_cmd = cmdbuf->cmd_addr - 1; ++ cmdbuf->cmd_addr += j; ++ i += j; ++ break; ++ default: ++ if (first_draw_cmd) { ++ ret = savage_dispatch_draw( ++ dev_priv, first_draw_cmd, ++ cmdbuf->cmd_addr - 1, ++ dmabuf, cmdbuf->vb_addr, ++ cmdbuf->vb_size, ++ cmdbuf->vb_stride, ++ cmdbuf->nbox, cmdbuf->box_addr); ++ if (ret != 0) ++ return ret; ++ first_draw_cmd = NULL; ++ } ++ } ++ if (first_draw_cmd) ++ continue; ++ ++ switch (cmd_header.cmd.cmd) { ++ case SAVAGE_CMD_STATE: ++ j = (cmd_header.state.count + 1) / 2; ++ if (i + j > cmdbuf->size) { ++ DRM_ERROR("command SAVAGE_CMD_STATE extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ret = savage_dispatch_state(dev_priv, &cmd_header, ++ (const uint32_t *)cmdbuf->cmd_addr); ++ cmdbuf->cmd_addr += j; ++ i += j; ++ break; ++ case SAVAGE_CMD_CLEAR: ++ if (i + 1 > cmdbuf->size) { ++ DRM_ERROR("command SAVAGE_CMD_CLEAR extends " ++ "beyond end of command buffer\n"); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ret = savage_dispatch_clear(dev_priv, &cmd_header, ++ cmdbuf->cmd_addr, ++ cmdbuf->nbox, ++ cmdbuf->box_addr); ++ cmdbuf->cmd_addr++; ++ i++; ++ break; ++ case SAVAGE_CMD_SWAP: ++ ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox, ++ cmdbuf->box_addr); ++ break; ++ default: ++ DRM_ERROR("invalid command 0x%x\n", ++ cmd_header.cmd.cmd); ++ DMA_FLUSH(); ++ ret = -EINVAL; ++ goto done; ++ } ++ ++ if (ret != 0) { ++ DMA_FLUSH(); ++ goto done; ++ } ++ } ++ ++ if (first_draw_cmd) { ++ ret = savage_dispatch_draw( ++ dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, ++ cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, ++ cmdbuf->nbox, cmdbuf->box_addr); ++ if (ret != 0) { ++ DMA_FLUSH(); ++ goto done; ++ } ++ } ++ ++ DMA_FLUSH(); ++ ++ if (dmabuf && cmdbuf->discard) { ++ drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; ++ uint16_t event; ++ event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); ++ SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); ++ savage_freelist_put(dev, dmabuf); ++ } ++ ++done: ++ /* If we didn't need to allocate them, these'll be NULL */ ++ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER); ++ drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER); ++ drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect), ++ DRM_MEM_DRIVER); ++ ++ return ret; ++} +diff -Nurd git/drivers/gpu/drm-tungsten/sis_drm.h git-nokia/drivers/gpu/drm-tungsten/sis_drm.h +--- git/drivers/gpu/drm-tungsten/sis_drm.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/sis_drm.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,67 @@ ++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ ++/* ++ * Copyright 2005 Eric Anholt ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ * SOFTWARE. ++ * ++ */ ++ ++#ifndef __SIS_DRM_H__ ++#define __SIS_DRM_H__ ++ ++/* SiS specific ioctls */ ++#define NOT_USED_0_3 ++#define DRM_SIS_FB_ALLOC 0x04 ++#define DRM_SIS_FB_FREE 0x05 ++#define NOT_USED_6_12 ++#define DRM_SIS_AGP_INIT 0x13 ++#define DRM_SIS_AGP_ALLOC 0x14 ++#define DRM_SIS_AGP_FREE 0x15 ++#define DRM_SIS_FB_INIT 0x16 ++ ++#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t) ++#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t) ++#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t) ++/* ++#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t) ++#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49) ++#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50) ++*/ ++ ++typedef struct { ++ int context; ++ unsigned int offset; ++ unsigned int size; ++ unsigned long free; ++} drm_sis_mem_t; ++ ++typedef struct { ++ unsigned int offset, size; ++} drm_sis_agp_t; ++ ++typedef struct { ++ unsigned int offset, size; ++} drm_sis_fb_t; ++ ++#endif /* __SIS_DRM_H__ */ +diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.c git-nokia/drivers/gpu/drm-tungsten/sis_drv.c +--- git/drivers/gpu/drm-tungsten/sis_drv.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,127 @@ ++/* sis.c -- sis driver -*- linux-c -*- ++ * ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include "drmP.h" ++#include "sis_drm.h" ++#include "sis_drv.h" ++ ++#include "drm_pciids.h" ++ ++static struct pci_device_id pciidlist[] = { ++ sis_PCI_IDS ++}; ++ ++ ++static int sis_driver_load(struct drm_device *dev, unsigned long chipset) ++{ ++ drm_sis_private_t *dev_priv; ++ int ret; ++ ++ dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); ++ if (dev_priv == NULL) ++ return -ENOMEM; ++ ++ dev->dev_private = (void *)dev_priv; ++ dev_priv->chipset = chipset; ++ ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); ++ if (ret) { ++ drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); ++ } ++ ++ return ret; ++} ++ ++static int sis_driver_unload(struct drm_device *dev) ++{ ++ drm_sis_private_t *dev_priv = dev->dev_private; ++ ++ drm_sman_takedown(&dev_priv->sman); ++ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); ++ ++ return 0; ++} ++ ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); ++static struct drm_driver driver = { ++ .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, ++ .load = sis_driver_load, ++ .unload = sis_driver_unload, ++ .context_dtor = NULL, ++ .dma_quiescent = sis_idle, ++ .reclaim_buffers = NULL, ++ .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, ++ .lastclose = sis_lastclose, ++ .get_map_ofs = drm_core_get_map_ofs, ++ .get_reg_ofs = drm_core_get_reg_ofs, ++ .ioctls = sis_ioctls, ++ .fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .ioctl = drm_ioctl, ++ .mmap = drm_mmap, ++ .poll = drm_poll, ++ .fasync = drm_fasync, ++ }, ++ .pci_driver = { ++ .name = DRIVER_NAME, ++ .id_table = pciidlist, ++ .probe = probe, ++ .remove = __devexit_p(drm_cleanup_pci), ++ }, ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = DRIVER_MAJOR, ++ .minor = DRIVER_MINOR, ++ .patchlevel = DRIVER_PATCHLEVEL, ++}; ++ ++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ return drm_get_dev(pdev, ent, &driver); ++} ++ ++static int __init sis_init(void) ++{ ++ driver.num_ioctls = sis_max_ioctl; ++ return drm_init(&driver, pciidlist); ++} ++ ++static void __exit sis_exit(void) ++{ ++ drm_exit(&driver); ++} ++ ++module_init(sis_init); ++module_exit(sis_exit); ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("GPL and additional rights"); +diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.h git-nokia/drivers/gpu/drm-tungsten/sis_drv.h +--- git/drivers/gpu/drm-tungsten/sis_drv.h 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.h 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,90 @@ ++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */ ++/* ++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. ++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. ++ * All rights reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the next ++ * paragraph) shall be included in all copies or substantial portions of the ++ * Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ * DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef _SIS_DRV_H_ ++#define _SIS_DRV_H_ ++ ++/* General customization: ++ */ ++ ++#define DRIVER_AUTHOR "SIS, Tungsten Graphics" ++#define DRIVER_NAME "sis" ++#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8" ++#define DRIVER_DATE "20070626" ++#define DRIVER_MAJOR 1 ++#define DRIVER_MINOR 3 ++#define DRIVER_PATCHLEVEL 0 ++ ++enum sis_family { ++ SIS_OTHER = 0, ++ SIS_CHIP_315 = 1, ++}; ++ ++#if defined(__linux__) ++#define SIS_HAVE_CORE_MM ++#endif ++ ++#ifdef SIS_HAVE_CORE_MM ++#include "drm_sman.h" ++ ++#define SIS_BASE (dev_priv->mmio) ++#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); ++#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val); ++ ++typedef struct drm_sis_private { ++ drm_local_map_t *mmio; ++ unsigned int idle_fault; ++ struct drm_sman sman; ++ unsigned int chipset; ++ int vram_initialized; ++ int agp_initialized; ++ unsigned long vram_offset; ++ unsigned long agp_offset; ++} drm_sis_private_t; ++ ++extern int sis_idle(struct drm_device *dev); ++extern void sis_reclaim_buffers_locked(struct drm_device *dev, ++ struct drm_file *file_priv); ++extern void sis_lastclose(struct drm_device *dev); ++ ++#else ++#include "sis_ds.h" ++ ++typedef struct drm_sis_private { ++ memHeap_t *AGPHeap; ++ memHeap_t *FBHeap; ++} drm_sis_private_t; ++ ++extern int sis_init_context(struct drm_device * dev, int context); ++extern int sis_final_context(struct drm_device * dev, int context); ++ ++#endif ++ ++extern struct drm_ioctl_desc sis_ioctls[]; ++extern int sis_max_ioctl; ++ ++#endif +diff -Nurd git/drivers/gpu/drm-tungsten/sis_mm.c git-nokia/drivers/gpu/drm-tungsten/sis_mm.c +--- git/drivers/gpu/drm-tungsten/sis_mm.c 1970-01-01 01:00:00.000000000 +0100 ++++ git-nokia/drivers/gpu/drm-tungsten/sis_mm.c 2008-12-08 14:52:52.000000000 +0100 +@@ -0,0 +1,332 @@ ++/************************************************************************** ++ * ++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, ++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR ++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE ++ * USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * ++ **************************************************************************/ ++ ++/* ++ * Authors: ++ * Thomas Hellström ++ */ ++ ++#include "drmP.h" ++#include "sis_drm.h" ++#include "sis_drv.h" ++ ++#if defined(__linux__) ++#include