summaryrefslogtreecommitdiff
path: root/packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch
diff options
context:
space:
mode:
authorMichael Lauer <mickey@vanille-media.de>2006-05-11 16:20:30 +0000
committerOpenEmbedded Project <openembedded-devel@lists.openembedded.org>2006-05-11 16:20:30 +0000
commita929c95a66b89e9a510fe41658144f9b6738dd6a (patch)
treef568507430cc61019bc7df19999c1c3e508714d1 /packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch
parent287fc0b071b62f477e83f13ad07af7bce610bd2d (diff)
parent844bf825c75c21864c1ba911642d58021e08e9ed (diff)
merge of 868246069be482bc64e4d7bd013d7a0df35de286
and e4d2a954ceceaa23fece37507c8e7f43cfbf1047
Diffstat (limited to 'packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch')
-rw-r--r--packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch2469
1 files changed, 2469 insertions, 0 deletions
diff --git a/packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch b/packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch
new file mode 100644
index 0000000000..e429a7b754
--- /dev/null
+++ b/packages/linux/linux-ezx/dpm-pxa27x-2.6.16.patch
@@ -0,0 +1,2469 @@
+Source: MontaVista Software, Inc.
+MR: 9340
+Type: Enhancment
+Disposition: submitted to
+Keywords: DPM, PXA27x
+Description:
+ Platform core support for DPM functions (NOT including drivers)
+
+
+Index: linux-2.6.16/arch/arm/mach-pxa/Makefile
+===================================================================
+--- linux-2.6.16.orig/arch/arm/mach-pxa/Makefile
++++ linux-2.6.16/arch/arm/mach-pxa/Makefile
+@@ -31,4 +31,5 @@ obj-$(CONFIG_PXA_SSP) += ssp.o
+
+ ifeq ($(CONFIG_PXA27x),y)
+ obj-$(CONFIG_PM) += standby.o
++obj-$(CONFIG_DPM) += dpm-pxa27x.o
+ endif
+Index: linux-2.6.16/include/asm-arm/arch-pxa/dpm.h
+===================================================================
+--- /dev/null
++++ linux-2.6.16/include/asm-arm/arch-pxa/dpm.h
+@@ -0,0 +1,157 @@
++/*
++ * include/asm-arm/arch-pxa/dpm.h
++ *
++ * Bulverde-specific definitions for DPM. If further PXA boards are
++ * supported in the future, will split into board-specific files.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) 2002 MontaVista Software <source@mvista.com>
++ *
++ * Based on arch/ppc/platforms/ibm405lp_dpm.h by Bishop Brock.
++ */
++
++#ifndef __ASM_ARM_PXA_DPM_H__
++#define __ASM_ARM_PXA_DPM_H__
++
++/*
++ * machine dependent operating state
++ *
++ * An operating state is a cpu execution state that has implications for power
++ * management. The DPM will select operating points based largely on the
++ * current operating state.
++ *
++ * DPM_STATES is the number of supported operating states. Valid operating
++ * states are from 0 to DPM_STATES-1 but when setting an operating state the
++ * kernel should only specify a state from the set of "base states" and should
++ * do so by name. During the context switch the new operating state is simply
++ * extracted from current->dpm_state.
++ *
++ * task states:
++ *
++ * APIs that reference task states use the range -(DPM_TASK_STATE_LIMIT + 1)
++ * through +DPM_TASK_STATE_LIMIT. This value is added to DPM_TASK_STATE to
++ * obtain the downward or upward adjusted task state value. The
++ * -(DPM_TASK_STATE_LIMIT + 1) value is interpreted specially, and equates to
++ * DPM_NO_STATE.
++ *
++ * Tasks inherit their task operating states across calls to
++ * fork(). DPM_TASK_STATE is the default operating state for all tasks, and is
++ * inherited from init. Tasks can change (or have changed) their tasks states
++ * using the DPM_SET_TASK_STATE variant of the sys_dpm() system call. */
++
++#define DPM_NO_STATE -1
++
++#define DPM_IDLE_TASK_STATE 0
++#define DPM_IDLE_STATE 1
++#define DPM_SLEEP_STATE 2
++#define DPM_BASE_STATES 3
++
++#define DPM_TASK_STATE_LIMIT 4
++#define DPM_TASK_STATE (DPM_BASE_STATES + DPM_TASK_STATE_LIMIT)
++#define DPM_STATES (DPM_TASK_STATE + DPM_TASK_STATE_LIMIT + 1)
++#define DPM_TASK_STATES (DPM_STATES - DPM_BASE_STATES)
++
++/*
++ *length of DPM_STATE_NAMES is DPM_STATES,
++ */
++#define DPM_STATE_NAMES \
++{ "idle-task", "idle", "sleep",\
++ "task-4", "task-3", "task-2", "task-1",\
++ "task", \
++ "task+1", "task+2", "task+3", "task+4" \
++}
++
++/* Operating point parameters */
++#define DPM_MD_V 0 /* Voltage */
++#define DPM_MD_PLL_L 1 /* L */
++#define DPM_MD_PLL_N 2 /* N */
++#define DPM_MD_PLL_B 3 /* B */
++#define DPM_MD_HALF_TURBO 4 /* Cuts turbo mode in half */
++#define DPM_MD_CCCRA 5 /* The A bit in the CCCR is
++ for MEMC clocks */
++#define DPM_MD_CPLL_ON 6 /* Core PLL on/off */
++#define DPM_MD_PPLL_ON 7 /* Peripheral PLL on/off */
++#define DPM_MD_SLEEP_MODE 8 /* Sleep mode, from pm.h */
++#define DPM_MD_PLL_LCD 9 /* calculated value */
++
++
++enum
++{
++ CPUMODE_RUN,
++ CPUMODE_IDLE,
++ CPUMODE_STANDBY,
++ CPUMODE_SLEEP,
++ CPUMODE_RESERVED,
++ CPUMODE_SENSE,
++ CPUMODE_RESERVED2,
++ CPUMODE_DEEPSLEEP,
++};
++
++/* this is the number of specifiable operating point parameters,
++ * used by arch-independent DPM-core driver
++ */
++#define DPM_PP_NBR 10
++#define DPM_PARAM_NAMES {"v","l","n","b","ht","a","cpll", "ppll","sleep", "lcd"};
++
++#ifndef __ASSEMBLER__
++
++#include <linux/types.h>
++#include <linux/proc_fs.h>
++#include <asm/hardware.h>
++#include <asm/arch/pxa-regs.h>
++
++#define dpm_time() (OSCR)
++#define DPM_NSEC_PER_TICK 308 /* nanoseconds per tick */
++#define dpm_time_to_usec(ticks) ({ \
++ unsigned long long quot = \
++ ((ticks) * DPM_NSEC_PER_TICK * 2 + 1); \
++ do_div(quot, (unsigned long) 1000*2); \
++ quot; })
++
++struct dpm_regs {
++ unsigned int cccr;
++ unsigned int clkcfg;
++ unsigned int voltage; /*This is not a register.*/
++};
++
++/* Instances of this structure define valid Bulverde operating points for DPM.
++ Voltages are represented in mV, and frequencies are represented in KHz. */
++
++struct dpm_md_opt {
++ /* Specified values */
++ int v; /* Target voltage in mV*/
++ int l; /* Run Mode to Oscillator ratio */
++ int n; /* Turbo-Mode to Run-Mode ratio */
++ int b; /* Fast Bus Mode */
++ int half_turbo;/* Half Turbo bit */
++ int cccra; /* the 'A' bit of the CCCR register,
++ alternate MEMC clock */
++ int cpll_enabled; /* core PLL is ON? (Bulverde >="C0" feature)*/
++ int ppll_enabled; /* peripherial PLL is ON? (Bulverde >="C0" feature)*/
++
++ int sleep_mode;
++ /*Calculated values*/
++ unsigned int lcd; /*in KHz */
++ unsigned int lpj; /*New value for loops_per_jiffy */
++ unsigned int cpu; /*CPU frequency in KHz */
++ unsigned int turbo; /* Turbo bit in clkcfg */
++
++ struct dpm_regs regs; /* Register values */
++};
++
++#endif /* __ASSEMBLER__ */
++#endif /* __ASM_BULVERDE_DPM_H__ */
++
+Index: linux-2.6.16/arch/arm/mach-pxa/dpm-pxa27x.c
+===================================================================
+--- /dev/null
++++ linux-2.6.16/arch/arm/mach-pxa/dpm-pxa27x.c
+@@ -0,0 +1,2110 @@
++/*
++ * arch/arm/mach-pxa/dpm-pxa27x.c DPM support for Intel PXA27x
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) 2002, 2005 MontaVista Software <source@mvista.com>.
++ *
++ * Includes code from David Burrage, Alexandre Rusev, and Todd Poynor,
++ * based on DPM code by Matthew Locke, Dmitry Chigirev and Bishop Brock.
++ *
++ * Includes cpufreq/ipm code by Chao Xie and Cain Yuan
++ * Copyright (C) 2003-2004 Intel Corporation.
++ */
++
++#include <linux/config.h>
++
++#include <linux/dpm.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++
++#include <linux/delay.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/hardware.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/arch/pxa-regs.h>
++#include <asm/arch/system.h>
++#include <asm/arch/dpm.h>
++#include <asm/mach/time.h>
++
++static int saved_loops_per_jiffy = 0;
++static int saved_cpu_freq = 0;
++
++#define CCCR_CPDIS_BIT_ON (1 << 31)
++#define CCCR_PPDIS_BIT_ON (1 << 30)
++#define CCCR_CPDIS_BIT_OFF (0 << 31)
++#define CCCR_PPDIS_BIT_OFF (0 << 30)
++#define CCCR_PLL_EARLY_EN_BIT_ON (1 << 26)
++#define CCSR_CPLL_LOCKED (1 << 29)
++#define CCSR_PPLL_LOCKED (1 << 28)
++
++/* CLKCFG
++ | 31------------------------------------------- | 3 | 2 | 1 | 0 |
++ | --------------------------------------------- | B | HT | F | T |
++*/
++#define CLKCFG_B_BIT (1 << 3)
++#define CLKCFG_HT_BIT (1 << 2)
++#define CLKCFG_F_BIT (1 << 1)
++#define CLKCFG_T_BIT 1
++
++#define PLL_L_MAX 31
++#define PLL_N_MAX 8
++
++/* The MIN for L is 2 in the Yellow Book tables, but L=1 really means
++ 13M mode, so L min includes 1 */
++#define PLL_L_MIN 1
++#define PLL_N_MIN 2
++
++#define CCLKCFG_TURBO 0x1
++#define CCLKCFG_FCS 0x2
++
++#define L_NUM 31 /* 30 different L numbers. */
++#define N_NUM 7 /* 7 N numbers. */
++
++#define BLVD_MIN_FREQ 13000
++/* latest PowerPoint documentation indicates 624000*/
++#define BLVD_MAX_FREQ 520000
++
++#define MAX_VOL 1400 /* in mV. */
++#define MIN_VOL 850 /* in Mv. */
++
++#define MDREFR_DRI 0xFFF
++#define MSC0_RDF (0xF << 20)
++#define MSC0_RDN (0xF << 24)
++#define MSC0_RRR (0x7 << 12)
++#define MDREFR_RFU 0xC0200000
++#define MDCNFG_DTC0 (0x3 << 8)
++#define MDCNFG_DTC2 (0x3 << 24)
++
++/* memory timing (MSC0,DTC,DRI) constants (see Blob and Intel BBU sources) */
++#define XLLI_MSC0_13 0x11101110
++#define XLLI_MSC0_19 0x11101110
++#define XLLI_MSC0_26 0x11201120 /* 26 MHz setting */
++#define XLLI_MSC0_32 0x11201120
++#define XLLI_MSC0_39 0x11301130 /* 39 MHz setting */
++#define XLLI_MSC0_45 0x11301130
++#define XLLI_MSC0_52 0x11401140 /* @ 52 MHz setting */
++#define XLLI_MSC0_58 0x11401140
++#define XLLI_MSC0_65 0x11501150 /* @ 65 MHz setting */
++#define XLLI_MSC0_68 0x11501150
++#define XLLI_MSC0_71 0x11501150 /* @ 71.5 MHz setting */
++#define XLLI_MSC0_74 0x11601160
++#define XLLI_MSC0_78 0x12601260 /* @ 78 MHz setting */
++#define XLLI_MSC0_81 0x12601260
++#define XLLI_MSC0_84 0x12601260 /* @ 84.5 MHz setting */
++#define XLLI_MSC0_87 0x12701270
++#define XLLI_MSC0_91 0x12701270 /* 91 MHz setting */
++#define XLLI_MSC0_94 0x12701270 /* 94.2 MHz setting */
++#define XLLI_MSC0_97 0x12701270 /* 97.5 MHz setting */
++#define XLLI_MSC0_100 0x12801280 /* 100.7 MHz setting */
++#define XLLI_MSC0_104 0x12801280 /* 104 MHz setting */
++#define XLLI_MSC0_110 0x12901290
++#define XLLI_MSC0_117 0x13901390 /* 117 MHz setting */
++#define XLLI_MSC0_124 0x13A013A0
++#define XLLI_MSC0_130 0x13A013A0 /* 130 MHz setting */
++#define XLLI_MSC0_136 0x13B013B0
++#define XLLI_MSC0_143 0x13B013B0
++#define XLLI_MSC0_149 0x13C013C0
++#define XLLI_MSC0_156 0x14C014C0
++#define XLLI_MSC0_162 0x14C014C0
++#define XLLI_MSC0_169 0x14C014C0
++#define XLLI_MSC0_175 0x14C014C0
++#define XLLI_MSC0_182 0x14C014C0
++#define XLLI_MSC0_188 0x14C014C0
++#define XLLI_MSC0_195 0x15C015C0
++#define XLLI_MSC0_201 0x15D015D0
++#define XLLI_MSC0_208 0x15D015D0
++
++/* DTC settings depend on 16/32 bit SDRAM we have (32 is chosen) */
++#define XLLI_DTC_13 0x00000000
++#define XLLI_DTC_19 0x00000000
++#define XLLI_DTC_26 0x00000000
++#define XLLI_DTC_32 0x00000000
++#define XLLI_DTC_39 0x00000000
++#define XLLI_DTC_45 0x00000000
++#define XLLI_DTC_52 0x00000000
++#define XLLI_DTC_58 0x01000100
++#define XLLI_DTC_65 0x01000100
++#define XLLI_DTC_68 0x01000100
++#define XLLI_DTC_71 0x01000100
++#define XLLI_DTC_74 0x01000100
++#define XLLI_DTC_78 0x01000100
++#define XLLI_DTC_81 0x01000100
++#define XLLI_DTC_84 0x01000100
++#define XLLI_DTC_87 0x01000100
++#define XLLI_DTC_91 0x02000200
++#define XLLI_DTC_94 0x02000200
++#define XLLI_DTC_97 0x02000200
++#define XLLI_DTC_100 0x02000200
++#define XLLI_DTC_104 0x02000200
++/* 110-208 MHz setting - SDCLK Halved*/
++#define XLLI_DTC_110 0x01000100
++#define XLLI_DTC_117 0x01000100
++#define XLLI_DTC_124 0x01000100
++#define XLLI_DTC_130 0x01000100
++#define XLLI_DTC_136 0x01000100
++#define XLLI_DTC_143 0x01000100
++#define XLLI_DTC_149 0x01000100
++#define XLLI_DTC_156 0x01000100
++#define XLLI_DTC_162 0x01000100
++#define XLLI_DTC_169 0x01000100
++#define XLLI_DTC_175 0x01000100
++/* 182-208 MHz setting - SDCLK Halved - Close to edge, so bump up */
++#define XLLI_DTC_182 0x02000200
++#define XLLI_DTC_188 0x02000200
++#define XLLI_DTC_195 0x02000200
++#define XLLI_DTC_201 0x02000200
++#define XLLI_DTC_208 0x02000200
++
++/* Optimal values for DRI (refreash interval) settings for
++ * various MemClk settings (MDREFR)
++ */
++#define XLLI_DRI_13 0x002
++#define XLLI_DRI_19 0x003
++#define XLLI_DRI_26 0x005
++#define XLLI_DRI_32 0x006
++#define XLLI_DRI_39 0x008
++#define XLLI_DRI_45 0x00A
++#define XLLI_DRI_52 0x00B
++#define XLLI_DRI_58 0x00D
++#define XLLI_DRI_65 0x00E
++#define XLLI_DRI_68 0x00F
++#define XLLI_DRI_71 0x010
++#define XLLI_DRI_74 0x011
++#define XLLI_DRI_78 0x012
++#define XLLI_DRI_81 0x012
++#define XLLI_DRI_84 0x013
++#define XLLI_DRI_87 0x014
++#define XLLI_DRI_91 0x015
++#define XLLI_DRI_94 0x016
++#define XLLI_DRI_97 0x016
++#define XLLI_DRI_100 0x017
++#define XLLI_DRI_104 0x018
++#define XLLI_DRI_110 0x01A
++#define XLLI_DRI_117 0x01B
++#define XLLI_DRI_124 0x01D
++#define XLLI_DRI_130 0x01E
++#define XLLI_DRI_136 0x020
++#define XLLI_DRI_143 0x021
++#define XLLI_DRI_149 0x023
++#define XLLI_DRI_156 0x025
++#define XLLI_DRI_162 0x026
++#define XLLI_DRI_169 0x028
++#define XLLI_DRI_175 0x029
++#define XLLI_DRI_182 0x02B
++#define XLLI_DRI_188 0x02D
++#define XLLI_DRI_195 0x02E
++#define XLLI_DRI_201 0x030
++#define XLLI_DRI_208 0x031
++
++
++
++/* timings for memory controller set up (masked values) */
++struct mem_timings{
++ unsigned int msc0; /* for MSC0 */
++ unsigned int dtc; /* for MDCNFG */
++ unsigned int dri; /* for MDREFR */
++};
++
++static unsigned int cpufreq_matrix[N_NUM][L_NUM + 1];
++static volatile int *ramstart;
++
++#define CP15R0_REV_MASK 0x0000000f
++#define PXA270_C5 0x7
++
++static u32 chiprev;
++static int mvdt_size;
++
++struct MvDAC {
++ unsigned int mv;
++ unsigned int DACIn;
++} *mvDACtable;
++
++/*
++ * Transfer desired mv to required DAC value.
++ * Vcore = 1.3v - ( 712uv * DACIn )
++ */
++static struct MvDAC table_c0[] = {
++ {1425, 0},
++ {1400, 69},
++ {1300, 248},
++ {1200, 428},
++ {1100, 601},
++ {1000, 777},
++ {950, 872},
++ {868, 1010},
++ {861, 0xFFFFFFFF},
++};
++
++/*
++ * Transfer desired mv to required DAC value, update for new boards,
++ * according to "Intel PXA27x Processor Developer's Kit User's Guide,
++ * April 2004, Revision 4.001"
++ * Vcore = 1.5V - (587uV * DAC(input)).
++ */
++static struct MvDAC table_c5[] = {
++ {1500, 0},
++ {1484,25},
++ {1471,50},
++ {1456,75},
++ {1441,100},
++ {1427,125},
++ {1412,150},
++ {1397,175},
++ {1383,200},
++ {1368,225},
++ {1353,250},
++ {1339,275},
++ {1323,300},
++ {1309,325},
++ {1294,350},
++ {1280,375},
++ {1265,400},
++ {1251,425},
++ {1236,450},
++ {1221,475},
++ {1207,500},
++ {1192,525},
++ {1177,550},
++ {1162,575},
++ {1148,600},
++ {1133,625},
++ {1118,650},
++ {1104,675},
++ {1089,700},
++ {1074,725},
++ {1060,750},
++ {1045,775},
++ {1030,800},
++ {1016,825},
++ {1001,850},
++ {986,875},
++ {972,900},
++ {957,925},
++ {942,950},
++ {928,975},
++ {913,1000},
++ {899, 1023},
++};
++
++static unsigned int mv2DAC(unsigned int mv)
++{
++ int i, num = mvdt_size;
++
++ if (mvDACtable[0].mv <= mv) { /* Max or bigger */
++ /* Return the first one */
++ return mvDACtable[0].DACIn;
++ }
++
++ if (mvDACtable[num - 1].mv >= mv) { /* Min or smaller */
++ /* Return the last one */
++ return mvDACtable[num - 1].DACIn;
++ }
++
++ /* The biggest and smallest value cases are covered, now the
++ loop may skip those */
++ for (i = 1; i <= (num - 1); i++) {
++ if ((mvDACtable[i].mv >= mv) && (mvDACtable[i + 1].mv < mv)) {
++ return mvDACtable[i].DACIn;
++ }
++ }
++
++ /* Should never get here */
++ return 0;
++}
++
++static void clr_all_sqc(void)
++{
++ int i = 0;
++ for (i = 0; i < 32; i++)
++ PCMD(i) &= ~PCMD_SQC;
++}
++
++static void clr_all_mbc(void)
++{
++ int i = 0;
++ for (i = 0; i < 32; i++)
++ PCMD(i) &= ~PCMD_MBC;
++}
++
++static void clr_all_dce(void)
++{
++ int i = 0;
++ for (i = 0; i < 32; i++)
++ PCMD(i) &= ~PCMD_DCE;
++}
++
++static void set_mbc_bit(int ReadPointer, int NumOfBytes)
++{
++ PCMD0 |= PCMD_MBC;
++ PCMD1 |= PCMD_MBC;
++}
++
++static void set_lc_bit(int ReadPointer, int NumOfBytes)
++{
++ PCMD0 |= PCMD_LC;
++ PCMD1 |= PCMD_LC;
++ PCMD2 |= PCMD_LC;
++}
++
++static void set_cmd_data(unsigned char *DataArray, int StartPoint, int size)
++{
++ PCMD0 &= 0xFFFFFF00;
++ PCMD0 |= DataArray[0];
++ PCMD1 &= 0xFFFFFF00;
++ PCMD1 |= DataArray[1];
++ PCMD2 &= 0xFFFFFF00;
++ PCMD2 |= DataArray[2];
++}
++
++/* coupled indicates that this VCS is to be coupled with a FCS */
++static void power_change_cmd(unsigned int DACValue, int coupled)
++{
++ unsigned char dataArray[3];
++
++ dataArray[0] = 0; /* Command 0 */
++ dataArray[1] = (DACValue & 0x000000FF); /* data LSB */
++ dataArray[2] = (DACValue & 0x0000FF00) >> 8; /* data MSB */
++
++ PVCR = 0;
++
++ PCFR &= ~PCFR_FVC;
++ PVCR &= 0xFFFFF07F; /* no delay is necessary */
++ PVCR &= 0xFFFFFF80; /* clear slave address */
++ PVCR |= 0x20; /* set slave address */
++
++ PVCR &= 0xFE0FFFFF; /* clear read pointer 0 */
++ PVCR |= 0;
++
++ /* DCE and SQC are not necessary for single command */
++ clr_all_sqc();
++ clr_all_dce();
++
++ clr_all_mbc();
++ set_mbc_bit(0, 2);
++
++ /* indicate the last byte of this command is holded in this register */
++ PCMD2 &= ~PCMD_MBC;
++
++ /* indicate this is the first command and last command also */
++ set_lc_bit(0, 3);
++
++ /* programming the command data bit */
++ set_cmd_data(dataArray, 0, 2);
++
++ if (coupled) {
++ /* Enable Power I2C and FVC */
++ PCFR |= (PCFR_PI2CEN | PCFR_FVC);
++ } else {
++ /* Enable Power I2C */
++ PCFR |= PCFR_PI2CEN;
++ }
++}
++
++static void change_voltage(void)
++{
++ unsigned long flags;
++ unsigned int unused;
++
++
++ local_irq_save(flags);
++
++ __asm__ __volatile__("\n\
++ @ WOKAROUND - Core hangs on voltage change at different\n\
++ @ alignments and at different core clock frequencies\n\
++ @ To ensure that no external fetches occur, we want to store the next\n\
++ @ several instructions that occur after the voltage change inside\n\
++ @ the cache. The load dependency stall near the retry label ensures \n\
++ @ that any outstanding instruction cacheline loads are complete before \n\
++ @ the mcr instruction is executed on the 2nd pass. This procedure \n\
++ @ ensures us that the internal bus will not be busy. \n\
++ \n\
++ b 2f \n\
++ nop \n\
++ .align 5 \n\
++2: \n\
++ ldr r0, [%1] @ APB register read and compare \n\
++ cmp r0, #0 @ fence for pending slow apb reads \n\
++ \n\
++ mov r0, #8 @ VC bit for PWRMODE \n\
++ movs r1, #1 @ don't execute mcr on 1st pass \n\
++ \n\
++ @ %1 points to uncacheable memory to force memory read \n\
++ \n\
++retry: \n\
++ ldreq r3, [%2] @ only stall on the 2nd pass\n\
++ cmpeq r3, #0 @ cmp causes fence on mem transfers\n\
++ cmp r1, #0 @ is this the 2nd pass? \n\
++ mcreq p14, 0, r0, c7, c0, 0 @ write to PWRMODE on 2nd pass only \n\
++ \n\
++ @ Read VC bit until it is 0, indicates that the VoltageChange is done.\n\
++ @ On first pass, we never set the VC bit, so it will be clear already.\n\
++ \n\
++VoltageChange_loop: \n\
++ mrc p14, 0, r3, c7, c0, 0 \n\
++ tst r3, #0x8 \n\
++ bne VoltageChange_loop \n\
++ \n\
++ subs r1, r1, #1 @ update conditional execution counter\n\
++ beq retry":"=&r"(unused)
++ :"r"(&CCCR), "r"(ramstart)
++ :"r0", "r1", "r3");
++
++ local_irq_restore(flags);
++}
++
++void vm_setvoltage(unsigned int DACValue)
++{
++ power_change_cmd(DACValue, 0 /* not-coupled */ );
++ /* Execute voltage change sequence */
++ change_voltage(); /* set VC on the PWRMODE on CP14 */
++}
++
++static void set_voltage(unsigned int mv)
++{
++ vm_setvoltage(mv2DAC(mv));
++}
++
++static int vcs_init(void)
++{
++ /* we distinguish new and old boards by proc chip
++ * revision, we assume new boards have C5 proc
++ * revision and we use the new table (table_c5) for them,
++ * for all other boards we use the old table (table_c0).
++ * Note, the logics won't work and inaccurate voltage
++ * will be set if C5 proc installed to old board
++ * and vice versa.
++ */
++
++ asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
++
++ chiprev &= CP15R0_REV_MASK;
++
++ if (chiprev == PXA270_C5) {
++ mvDACtable = table_c5;
++ mvdt_size = sizeof(table_c5) / sizeof(struct MvDAC);
++ } else {
++ mvDACtable = table_c0;
++ mvdt_size = sizeof(table_c0) / sizeof(struct MvDAC);
++ }
++
++ CKEN |= 0x1 << 15;
++ CKEN |= 0x1 << 14;
++ PCFR = PCFR_PI2CEN;
++ return 0;
++}
++
++static void initialize_freq_matrix(void)
++{
++ int n, l;
++
++ memset(&cpufreq_matrix, 0, sizeof(cpufreq_matrix));
++
++ for (n = 2; n < N_NUM + 2; n++) {
++ for (l = 2; l <= L_NUM; l++) {
++ cpufreq_matrix[n - 2][l - 2] = (13 * n * l / 2) * 1000;
++ if (cpufreq_matrix[n - 2][l - 2] > BLVD_MAX_FREQ)
++ cpufreq_matrix[n - 2][l - 2] = 0;
++ }
++ }
++}
++
++/*
++ This should be called with a valid freq point that was
++ obtained via validate_speed
++*/
++static void set_freq(unsigned int CLKCFGValue)
++{
++ unsigned long flags;
++ unsigned int unused;
++ unsigned int fcsbits = 0xe3dfeff;
++ volatile int v;
++
++ local_irq_save(flags);
++
++ /*
++ force a tlb fault to get the mapping into the tlb
++ (otherwise this will occur below when the sdram is turned off and
++ something-bad(tm) will happen)
++ */
++
++ v = *(volatile unsigned long *)ramstart;
++ *(volatile unsigned long *)ramstart = v;
++
++ __asm__ __volatile__(" \n\
++ ldr r4, [%1] @load MDREFR \n\
++ mcr p14, 0, %2, c6, c0, 0 @ set CCLKCFG[FCS] \n\
++ ldr r5, [%3] \n\
++ and r4, r4, r5 \n\
++ str r4, [%1] @restore \n\
++ ":"=&r"(unused)
++ :"r"(&MDREFR), "r"(CLKCFGValue), "r"(&fcsbits)
++ :"r4", "r5");
++
++ local_irq_restore(flags);
++}
++
++static int get_freq(void)
++{
++ unsigned int freq, n, l, ccsr;
++
++ ccsr = CCSR;
++
++ l = ccsr & CCCR_L_MASK; /* Get L */
++ n = (ccsr & CCCR_N_MASK) >> 7; /* Get 2N */
++
++ if (n < 2)
++ n = 2;
++
++ /* Shift to divide by 2 because N is really 2N */
++ freq = (13000 * l * n) >> 1; /* in kHz */
++
++ return freq;
++}
++
++static unsigned int read_clkcfg(void)
++{
++ unsigned int value = 0;
++ unsigned int un_used;
++
++ __asm__ __volatile__("mrc p14, 0, %1, c6, c0, 0": "=&r"(un_used):"r"(value));
++
++ return value;
++}
++
++static int init_freqs(void)
++{
++ int cpu_ver;
++
++ asm volatile ("mrc%? p15, 0, %0, c0, c0":"=r" (cpu_ver));
++
++ /*
++ Bulverde A0: 0x69054110,
++ A1: 0x69054111
++ */
++ if ((cpu_ver & 0x0000f000) >> 12 == 4 &&
++ (cpu_ver & 0xffff0000) >> 16 == 0x6905) {
++ /* It is a PXA27x chip. */
++ return 1;
++ }
++
++ return 0;
++}
++
++static int freq_init(void)
++{
++ unsigned int freq;
++
++ /*
++ * In order to turn the sdram back on (see below) we need to
++ * r/w the sdram. We need to do this without the cache and
++ * write buffer in the way. So, we temporarily ioremap the
++ * first page of sdram as uncached i/o memory and use the
++ * aliased address
++ */
++
++ /* map the first page of sdram to an uncached virtual page */
++ ramstart = (int *)ioremap(PHYS_OFFSET, 4096);
++
++ if (! ramstart) {
++ printk(KERN_ERR "PXA27x DPM: ioremap of first page failed.");
++ return -1;
++ }
++
++ initialize_freq_matrix();
++
++ if (init_freqs()) {
++ freq = get_freq(); /* in kHz */
++ printk(KERN_INFO "PXA27x DPM: Initial frequency is %dkHz.\n", freq);
++ return 0;
++ }
++
++ return -1;
++}
++
++void freq_cleanup(void)
++{
++ /* unmap the page we used*/
++ iounmap((void *)ramstart);
++}
++
++static unsigned long
++calculate_memclk(unsigned long cccr, unsigned long clkcfg)
++{
++ unsigned long M, memclk;
++ u32 L;
++
++ L = cccr & 0x1f;
++ if (cccr & (1 << 25)) {
++ if (clkcfg & CLKCFG_B_BIT)
++ memclk = (L*13);
++ else
++ memclk = (L*13)/2;
++ }
++ else {
++ if (L <= 10) M = 1;
++ else if (L <= 20) M = 2;
++ else M = 4;
++
++ memclk = (L*13)/M;
++ }
++
++ return memclk;
++}
++
++static unsigned long
++calculate_new_memclk(struct dpm_regs *regs)
++{
++ return calculate_memclk(regs->cccr, regs->clkcfg);
++}
++
++static unsigned long
++calculate_cur_memclk(void)
++{
++ unsigned long cccr = CCCR;
++ return calculate_memclk(cccr, read_clkcfg());
++}
++
++/* Returns optimal timings for memory controller
++ * a - [A]
++ * b - [B]
++ * l - value of L
++ */
++static struct mem_timings get_optimal_mem_timings(int a, int b, int l){
++ struct mem_timings ret = {
++ .msc0 = 0,
++ .dtc = 0,
++ .dri = 0,
++ };
++
++ if(a!=0 && b==0){
++ switch(l){
++ case 2:
++ ret.msc0 = XLLI_MSC0_13;
++ ret.dtc = XLLI_DTC_13;
++ ret.dri = XLLI_DRI_13;
++ break;
++ case 3:
++ ret.msc0 = XLLI_MSC0_19;
++ ret.dtc = XLLI_DTC_19;
++ ret.dri = XLLI_DRI_19;
++ break;
++ case 4:
++ ret.msc0 = XLLI_MSC0_26;
++ ret.dtc = XLLI_DTC_26;
++ ret.dri = XLLI_DRI_26;
++ break;
++ case 5:
++ ret.msc0 = XLLI_MSC0_32;
++ ret.dtc = XLLI_DTC_32;
++ ret.dri = XLLI_DRI_32;
++ break;
++ case 6:
++ ret.msc0 = XLLI_MSC0_39;
++ ret.dtc = XLLI_DTC_39;
++ ret.dri = XLLI_DRI_39;
++ break;
++ case 7:
++ ret.msc0 = XLLI_MSC0_45;
++ ret.dtc = XLLI_DTC_45;
++ ret.dri = XLLI_DRI_45;
++ break;
++ case 8:
++ ret.msc0 = XLLI_MSC0_52;
++ ret.dtc = XLLI_DTC_52;
++ ret.dri = XLLI_DRI_52;
++ break;
++ case 9:
++ ret.msc0 = XLLI_MSC0_58;
++ ret.dtc = XLLI_DTC_58;
++ ret.dri = XLLI_DRI_58;
++ break;
++ case 10:
++ ret.msc0 = XLLI_MSC0_65;
++ ret.dtc = XLLI_DTC_65;
++ ret.dri = XLLI_DRI_65;
++ break;
++ /*
++ * L11 - L20 ARE THE SAME for A0Bx
++ */
++ case 11:
++ ret.msc0 = XLLI_MSC0_71;
++ ret.dtc = XLLI_DTC_71;
++ ret.dri = XLLI_DRI_71;
++ break;
++ case 12:
++ ret.msc0 = XLLI_MSC0_78;
++ ret.dtc = XLLI_DTC_78;
++ ret.dri = XLLI_DRI_78;
++ break;
++ case 13:
++ ret.msc0 = XLLI_MSC0_84;
++ ret.dtc = XLLI_DTC_84;
++ ret.dri = XLLI_DRI_84;
++ break;
++ case 14:
++ ret.msc0 = XLLI_MSC0_91;
++ ret.dtc = XLLI_DTC_91;
++ ret.dri = XLLI_DRI_91;
++ break;
++ case 15:
++ ret.msc0 = XLLI_MSC0_97;
++ ret.dtc = XLLI_DTC_97;
++ ret.dri = XLLI_DRI_97;
++ break;
++ case 16:
++ ret.msc0 = XLLI_MSC0_104;
++ ret.dtc = XLLI_DTC_104;
++ ret.dri = XLLI_DRI_104;
++ break;
++ case 17:
++ ret.msc0 = XLLI_MSC0_110;
++ ret.dtc = XLLI_DTC_110;
++ ret.dri = XLLI_DRI_110;
++ break;
++ case 18:
++ ret.msc0 = XLLI_MSC0_117;
++ ret.dtc = XLLI_DTC_117;
++ ret.dri = XLLI_DRI_117;
++ break;
++ case 19:
++ ret.msc0 = XLLI_MSC0_124;
++ ret.dtc = XLLI_DTC_124;
++ ret.dri = XLLI_DRI_124;
++ break;
++ case 20:
++ ret.msc0 = XLLI_MSC0_130;
++ ret.dtc = XLLI_DTC_130;
++ ret.dri = XLLI_DRI_130;
++ break;
++ case 21:
++ ret.msc0 = XLLI_MSC0_136;
++ ret.dtc = XLLI_DTC_136;
++ ret.dri = XLLI_DRI_136;
++ break;
++ case 22:
++ ret.msc0 = XLLI_MSC0_143;
++ ret.dtc = XLLI_DTC_143;
++ ret.dri = XLLI_DRI_143;
++ break;
++ case 23:
++ ret.msc0 = XLLI_MSC0_149;
++ ret.dtc = XLLI_DTC_149;
++ ret.dri = XLLI_DRI_149;
++ break;
++ case 24:
++ ret.msc0 = XLLI_MSC0_156;
++ ret.dtc = XLLI_DTC_156;
++ ret.dri = XLLI_DRI_156;
++ break;
++ case 25:
++ ret.msc0 = XLLI_MSC0_162;
++ ret.dtc = XLLI_DTC_162;
++ ret.dri = XLLI_DRI_162;
++ break;
++ case 26:
++ ret.msc0 = XLLI_MSC0_169;
++ ret.dtc = XLLI_DTC_169;
++ ret.dri = XLLI_DRI_169;
++ break;
++ case 27:
++ ret.msc0 = XLLI_MSC0_175;
++ ret.dtc = XLLI_DTC_175;
++ ret.dri = XLLI_DRI_175;
++ break;
++ case 28:
++ ret.msc0 = XLLI_MSC0_182;
++ ret.dtc = XLLI_DTC_182;
++ ret.dri = XLLI_DRI_182;
++ break;
++ case 29:
++ ret.msc0 = XLLI_MSC0_188;
++ ret.dtc = XLLI_DTC_188;
++ ret.dri = XLLI_DRI_188;
++ break;
++ case 30:
++ ret.msc0 = XLLI_MSC0_195;
++ ret.dtc = XLLI_DTC_195;
++ ret.dri = XLLI_DRI_195;
++ break;
++ case 31:
++ ret.msc0 = XLLI_MSC0_201;
++ ret.dtc = XLLI_DTC_201;
++ ret.dri = XLLI_DRI_201;
++ }
++
++ }else if(a!=0 && b!=0){
++ switch(l){
++ case 2:
++ ret.msc0 = XLLI_MSC0_26;
++ ret.dtc = XLLI_DTC_26;
++ ret.dri = XLLI_DRI_26;
++ break;
++ case 3:
++ ret.msc0 = XLLI_MSC0_39;
++ ret.dtc = XLLI_DTC_39;
++ ret.dri = XLLI_DRI_39;
++ break;
++ case 4:
++ ret.msc0 = XLLI_MSC0_52;
++ ret.dtc = XLLI_DTC_52;
++ ret.dri = XLLI_DRI_52;
++ break;
++ case 5:
++ ret.msc0 = XLLI_MSC0_65;
++ ret.dtc = XLLI_DTC_65;
++ ret.dri = XLLI_DRI_65;
++ break;
++ case 6:
++ ret.msc0 = XLLI_MSC0_78;
++ ret.dtc = XLLI_DTC_78;
++ ret.dri = XLLI_DRI_78;
++ break;
++ case 7:
++ ret.msc0 = XLLI_MSC0_91;
++ ret.dtc = XLLI_DTC_91;
++ ret.dri = XLLI_DRI_91;
++ break;
++ case 8:
++ ret.msc0 = XLLI_MSC0_104;
++ ret.dtc = XLLI_DTC_104;
++ ret.dri = XLLI_DRI_104;
++ break;
++ case 9:
++ ret.msc0 = XLLI_MSC0_117;
++ ret.dtc = XLLI_DTC_117;
++ ret.dri = XLLI_DRI_117;
++ break;
++ case 10:
++ ret.msc0 = XLLI_MSC0_130;
++ ret.dtc = XLLI_DTC_130;
++ ret.dri = XLLI_DRI_130;
++ break;
++ case 11:
++ ret.msc0 = XLLI_MSC0_143;
++ ret.dtc = XLLI_DTC_143;
++ ret.dri = XLLI_DRI_143;
++ break;
++ case 12:
++ ret.msc0 = XLLI_MSC0_156;
++ ret.dtc = XLLI_DTC_156;
++ ret.dri = XLLI_DRI_156;
++ break;
++ case 13:
++ ret.msc0 = XLLI_MSC0_169;
++ ret.dtc = XLLI_DTC_169;
++ ret.dri = XLLI_DRI_169;
++ break;
++ case 14:
++ ret.msc0 = XLLI_MSC0_182;
++ ret.dtc = XLLI_DTC_182;
++ ret.dri = XLLI_DRI_182;
++ break;
++ case 15:
++ ret.msc0 = XLLI_MSC0_195;
++ ret.dtc = XLLI_DTC_195;
++ ret.dri = XLLI_DRI_195;
++ break;
++ case 16:
++ ret.msc0 = XLLI_MSC0_208;
++ ret.dtc = XLLI_DTC_208;
++ ret.dri = XLLI_DRI_208;
++ }
++ }else{
++ /* A0Bx */
++ switch(l){
++ case 2:
++ ret.msc0 = XLLI_MSC0_26;
++ ret.dtc = XLLI_DTC_26;
++ ret.dri = XLLI_DRI_26;
++ break;
++ case 3:
++ ret.msc0 = XLLI_MSC0_39;
++ ret.dtc = XLLI_DTC_39;
++ ret.dri = XLLI_DRI_39;
++ break;
++ case 4:
++ ret.msc0 = XLLI_MSC0_52;
++ ret.dtc = XLLI_DTC_52;
++ ret.dri = XLLI_DRI_52;
++ break;
++ case 5:
++ ret.msc0 = XLLI_MSC0_65;
++ ret.dtc = XLLI_DTC_65;
++ ret.dri = XLLI_DRI_65;
++ break;
++ case 6:
++ ret.msc0 = XLLI_MSC0_78;
++ ret.dtc = XLLI_DTC_78;
++ ret.dri = XLLI_DRI_78;
++ break;
++ case 7:
++ ret.msc0 = XLLI_MSC0_91;
++ ret.dtc = XLLI_DTC_91;
++ ret.dri = XLLI_DRI_91;
++ break;
++ case 8:
++ ret.msc0 = XLLI_MSC0_104;
++ ret.dtc = XLLI_DTC_104;
++ ret.dri = XLLI_DRI_104;
++ break;
++ case 9:
++ ret.msc0 = XLLI_MSC0_117;
++ ret.dtc = XLLI_DTC_117;
++ ret.dri = XLLI_DRI_117;
++ break;
++ case 10:
++ ret.msc0 = XLLI_MSC0_130;
++ ret.dtc = XLLI_DTC_130;
++ ret.dri = XLLI_DRI_130;
++ break;
++ case 11:
++ ret.msc0 = XLLI_MSC0_71;
++ ret.dtc = XLLI_DTC_71;
++ ret.dri = XLLI_DRI_71;
++ break;
++ case 12:
++ ret.msc0 = XLLI_MSC0_78;
++ ret.dtc = XLLI_DTC_78;
++ ret.dri = XLLI_DRI_78;
++ break;
++ case 13:
++ ret.msc0 = XLLI_MSC0_84;
++ ret.dtc = XLLI_DTC_84;
++ ret.dri = XLLI_DRI_84;
++ break;
++ case 14:
++ ret.msc0 = XLLI_MSC0_91;
++ ret.dtc = XLLI_DTC_91;
++ ret.dri = XLLI_DRI_91;
++ break;
++ case 15:
++ ret.msc0 = XLLI_MSC0_97;
++ ret.dtc = XLLI_DTC_97;
++ ret.dri = XLLI_DRI_97;
++ break;
++ case 16:
++ ret.msc0 = XLLI_MSC0_104;
++ ret.dtc = XLLI_DTC_104;
++ ret.dri = XLLI_DRI_104;
++ break;
++ case 17:
++ ret.msc0 = XLLI_MSC0_110;
++ ret.dtc = XLLI_DTC_110;
++ ret.dri = XLLI_DRI_110;
++ break;
++ case 18:
++ ret.msc0 = XLLI_MSC0_117;
++ ret.dtc = XLLI_DTC_117;
++ ret.dri = XLLI_DRI_117;
++ break;
++ case 19:
++ ret.msc0 = XLLI_MSC0_124;
++ ret.dtc = XLLI_DTC_124;
++ ret.dri = XLLI_DRI_124;
++ break;
++ case 20:
++ ret.msc0 = XLLI_MSC0_130;
++ ret.dtc = XLLI_DTC_130;
++ ret.dri = XLLI_DRI_130;
++ break;
++ case 21:
++ ret.msc0 = XLLI_MSC0_68;
++ ret.dtc = XLLI_DTC_68;
++ ret.dri = XLLI_DRI_68;
++ break;
++ case 22:
++ ret.msc0 = XLLI_MSC0_71;
++ ret.dtc = XLLI_DTC_71;
++ ret.dri = XLLI_DRI_71;
++ break;
++ case 23:
++ ret.msc0 = XLLI_MSC0_74;
++ ret.dtc = XLLI_DTC_74;
++ ret.dri = XLLI_DRI_74;
++ break;
++ case 24:
++ ret.msc0 = XLLI_MSC0_78;
++ ret.dtc = XLLI_DTC_78;
++ ret.dri = XLLI_DRI_78;
++ break;
++ case 25:
++ ret.msc0 = XLLI_MSC0_81;
++ ret.dtc = XLLI_DTC_81;
++ ret.dri = XLLI_DRI_81;
++ break;
++ case 26:
++ ret.msc0 = XLLI_MSC0_84;
++ ret.dtc = XLLI_DTC_84;
++ ret.dri = XLLI_DRI_84;
++ break;
++ case 27:
++ ret.msc0 = XLLI_MSC0_87;
++ ret.dtc = XLLI_DTC_87;
++ ret.dri = XLLI_DRI_87;
++ break;
++ case 28:
++ ret.msc0 = XLLI_MSC0_91;
++ ret.dtc = XLLI_DTC_91;
++ ret.dri = XLLI_DRI_91;
++ break;
++ case 29:
++ ret.msc0 = XLLI_MSC0_94;
++ ret.dtc = XLLI_DTC_94;
++ ret.dri = XLLI_DRI_94;
++ break;
++ case 30:
++ ret.msc0 = XLLI_MSC0_97;
++ ret.dtc = XLLI_DTC_97;
++ ret.dri = XLLI_DRI_97;
++ break;
++ case 31:
++ ret.msc0 = XLLI_MSC0_100;
++ ret.dtc = XLLI_DTC_100;
++ ret.dri = XLLI_DRI_100;
++ }
++ }
++
++ return ret;
++}
++
++static void assign_optimal_mem_timings(
++ unsigned int* msc0_reg,
++ unsigned int* mdrefr_reg,
++ unsigned int* mdcnfg_reg,
++ int a, int b, int l
++ )
++{
++ unsigned int msc0_reg_tmp = (*msc0_reg);
++ unsigned int mdrefr_reg_tmp = (*mdrefr_reg);
++ unsigned int mdcnfg_reg_tmp = (*mdcnfg_reg);
++ struct mem_timings timings = get_optimal_mem_timings(a,b,l);
++
++ /* clear bits which are set by get_optimal_mem_timings*/
++ msc0_reg_tmp &= ~(MSC0_RDF & MSC0_RDN & MSC0_RRR);
++ mdrefr_reg_tmp &= ~(MDREFR_RFU & MDREFR_DRI);
++ mdcnfg_reg_tmp &= ~(MDCNFG_DTC0 & MDCNFG_DTC2);
++
++ /* prepare appropriate timings */
++ msc0_reg_tmp |= timings.msc0;
++ mdrefr_reg_tmp |= timings.dri;
++ mdcnfg_reg_tmp |= timings.dtc;
++
++ /* set timings (all bits one time) */
++ (*msc0_reg) = msc0_reg_tmp;
++ (*mdrefr_reg) = mdrefr_reg_tmp;
++ (*mdcnfg_reg) = mdcnfg_reg_tmp;
++}
++
++static void set_mdrefr_value(u32 new_mdrefr){
++ unsigned long s, old_mdrefr, errata62;
++ old_mdrefr = MDREFR;
++ /* E62 (28007106.pdf): Memory controller may hang while clearing
++ * MDREFR[K1DB2] or MDREFR[K2DB2]
++ */
++ errata62 =
++ (((old_mdrefr & MDREFR_K1DB2) != 0) && ((new_mdrefr & MDREFR_K1DB2) == 0)) ||
++ (((old_mdrefr & MDREFR_K2DB2) != 0) && ((new_mdrefr & MDREFR_K2DB2) == 0));
++
++ if(errata62){
++ unsigned long oscr_0 = OSCR;
++ unsigned long oscr_1 = oscr_0;
++ /* Step 1 - disable interrupts */
++ local_irq_save(s);
++ /* Step 2 - leave KxDB2, but set MDREFR[DRI] (bits 0-11) to
++ * 0xFFF
++ */
++ MDREFR = MDREFR | MDREFR_DRI;
++ /* Step 3 - read MDREFR one time */
++ MDREFR;
++ /* Step 4 - wait 1.6167us
++ * (3.25MHz clock increments OSCR0 7 times)
++ */
++ while(oscr_1-oscr_0 < 7){
++ cpu_relax();
++ oscr_1 = OSCR;
++ }
++
++ }
++
++ /* Step 5 - clear K1DB1 and/or K2DB2, and set MDREFR[DRI] to
++ * proper value at the same time
++ */
++
++ /*Set MDREFR as if no errata workaround is needed*/
++ MDREFR = new_mdrefr;
++
++ if(errata62){
++ /* Step 6 - read MDREFR one time*/
++ MDREFR;
++ /* Step 7 - enable interrupts*/
++ local_irq_restore(s);
++ }
++}
++
++static void scale_cpufreq(struct dpm_regs *regs)
++{
++ unsigned long new_memclk, cur_memclk;
++ u32 new_mdrefr, cur_mdrefr, read_mdrefr;
++ u32 new_msc0, new_mdcnfg;
++ int set_mdrefr = 0, scaling_up = 0;
++ int l, a, b;
++
++ l = regs->cccr & CCCR_L_MASK; /* Get L */
++ b = (regs->clkcfg >> 3) & 0x1;
++ a = (regs->cccr >> 25) & 0x1; /* cccr[A]: bit 25 */
++ cur_memclk = calculate_cur_memclk();
++ new_memclk = calculate_new_memclk(regs);
++
++ new_mdrefr = cur_mdrefr = MDREFR;
++ new_msc0 = MSC0;
++ new_mdcnfg = MDCNFG;
++
++ if (new_memclk != cur_memclk) {
++ new_mdrefr &= ~( MDREFR_K0DB2 | MDREFR_K0DB4 |
++ MDREFR_K1DB2 | MDREFR_K2DB2 );
++
++ if ((new_memclk > 52) && (new_memclk <= 104)) {
++ /* SDCLK0 = MEMCLK/2, SDCLK1,SDCLK2 = MEMCLK */
++ new_mdrefr |= MDREFR_K0DB2;
++ }
++ else if (new_memclk > 104){
++ /* SDCLK0 = MEMCLK/4, SDCLK1 and SDCLK2 = MEMCLK/2 */
++ new_mdrefr |= (MDREFR_K0DB4 | MDREFR_K1DB2 | MDREFR_K2DB2);
++ }
++
++ /* clock increasing or decreasing? */
++ if (new_memclk > cur_memclk) scaling_up = 1;
++ }
++
++ /* set MDREFR if necessary */
++ if (new_mdrefr != cur_mdrefr){
++ set_mdrefr = 1;
++ /* also adjust timings as long as we change MDREFR value */
++ assign_optimal_mem_timings(
++ &new_msc0,
++ &new_mdrefr,
++ &new_mdcnfg,
++ a,b,l
++ );
++ }
++
++ /* if memclk is scaling up, set MDREFR before freq change
++ * (2800002.pdf:6.5.1.4)
++ */
++ if (set_mdrefr && scaling_up) {
++ MSC0 = new_msc0;
++ set_mdrefr_value(new_mdrefr);
++ MDCNFG = new_mdcnfg;
++ read_mdrefr = MDREFR;
++ }
++
++ CCCR = regs->cccr;
++ set_freq(regs->clkcfg);
++
++ /* if memclk is scaling down, set MDREFR after freq change
++ * (2800002.pdf:6.5.1.4)
++ */
++ if (set_mdrefr && !scaling_up) {
++ MSC0 = new_msc0;
++ set_mdrefr_value(new_mdrefr);
++ MDCNFG = new_mdcnfg;
++ read_mdrefr = MDREFR;
++ }
++}
++
++static void scale_voltage(struct dpm_regs *regs)
++{
++ set_voltage(regs->voltage);
++}
++
++static void scale_voltage_coupled(struct dpm_regs *regs)
++{
++ power_change_cmd(mv2DAC(regs->voltage), 1 /* coupled */ );
++}
++
++static void calculate_lcd_freq(struct dpm_md_opt *opt)
++{
++ int k = 1; /* lcd divisor */
++
++ /* L is verified to be between PLL_L_MAX and PLL_L_MIN in
++ dpm_bulverde_init_opt().
++ */
++ if (opt->l == -1) {
++ opt->lcd = -1;
++ return;
++ }
++
++ if (opt->l > 16) {
++ /* When L=17-31, K=4 */
++ k = 4;
++ } else if (opt->l > 7) {
++ /* When L=8-16, K=2 */
++ k = 2;
++ }
++
++ /* Else, when L=2-7, K=1 */
++
++ opt->lcd = 13000 * opt->l / k;
++}
++
++static void calculate_reg_values(struct dpm_md_opt *opt)
++{
++ int f = 0; /* frequency change bit */
++ int turbo = 0; /* turbo mode bit; depends on N value */
++
++ opt->regs.voltage = opt->v;
++
++/*
++ CCCR:
++
++ A: Alternate setting for MEMC clock
++ 0 = MEM clock frequency as specified in user guide table
++ 1 = MEM clock frq = System Bus Frequency
++
++ CLKCFG:
++
++ B = Fast-Bus Mode 0: System Bus is half of run-mode
++ 1: System Bus is equal to run-mode
++ NOTE: only allowed when L <= 16
++
++ HT = Half-Turbo 0: core frequency = run or turbo, depending on T bit
++ 1: core frequency = turbo frequency / 2
++ NOTE: only allowed when 2N = 6 or 2N = 8
++
++ F = Frequency change
++ 0: No frequency change is performed
++ 1: Do frequency-change
++
++ T = Turbo Mode 0: CPU operates at run Frequency
++ 1: CPU operates at Turbo Frequency (when n2 > 2)
++*/
++ /* Set the CLKCFG with B, T, and HT */
++ if (opt->b != -1 && opt->n != -1) {
++ f = 1;
++
++ /*When 2N=2, Turbo Mode equals Run Mode, so it
++ does not really matter if this is >2 or >=2
++ */
++ if (opt->n > 2) {
++ turbo = 0x1;
++ }
++ opt->regs.clkcfg = (opt->b << 3) + (f << 1) + turbo;
++ } else {
++ f = 0x1;
++ opt->regs.clkcfg = (f << 1);
++ }
++
++ /*
++ What about when 2N=0 ... it is not defined by the yellow
++ book
++ */
++ if (opt->n != -1) {
++ /* 2N is 4 bits, L is 5 bits */
++ opt->regs.cccr = ((opt->n & 0xF) << 7) + (opt->l & 0x1F);
++ }
++
++ if (opt->cccra > 0) {
++ /* Turn on the CCCR[A] bit */
++ opt->regs.cccr |= (1 << 25);
++ }
++
++ if(opt->cpll_enabled == 0) {
++ opt->regs.cccr |= (CCCR_CPDIS_BIT_ON);
++ }
++ if(opt->ppll_enabled == 0) {
++ opt->regs.cccr |= (CCCR_PPDIS_BIT_ON);
++ }
++
++}
++
++static int init_opt(struct dpm_opt *opt)
++{
++ int v = -1;
++ int l = -1;
++ int n2 = -1;
++ int b = -1;
++ int half_turbo = -1;
++ int cccra = -1;
++ int cpll_enabled = -1;
++ int ppll_enabled = -1;
++ int sleep_mode = -1;
++ struct dpm_md_opt *md_opt = NULL;
++
++ v = opt->pp[DPM_MD_V];
++ l = opt->pp[DPM_MD_PLL_L];
++ n2 = opt->pp[DPM_MD_PLL_N]; /* 2*N */
++ b = opt->pp[DPM_MD_PLL_B]; /* Fast bus mode bit. */
++ half_turbo = opt->pp[DPM_MD_HALF_TURBO];
++ cccra = opt->pp[DPM_MD_CCCRA]; /* Alternate setting
++ for the MEM clock */
++ cpll_enabled = opt->pp[DPM_MD_CPLL_ON];
++ ppll_enabled = opt->pp[DPM_MD_PPLL_ON];
++ sleep_mode = opt->pp[DPM_MD_SLEEP_MODE];
++
++ md_opt = &opt->md_opt;
++
++ /* Up-front error checking. If we fail any of these, then the
++ whole operating point is suspect and therefore invalid.
++ */
++
++ /*PXA27x manual ("Yellow book") 3.5.5 (Table 3-7) states that CPLL-"On" and
++ *PPLL-"Off"
++ *configuration is forbidden (all others seam to be OK for "B0")
++ *for "C0" boards we suppose that this configuration is also enabled.
++ *PXA27x manual ("Yellow book") also states at 3.5.7.1 (page 3-25)
++ *that "CCCR[PPDIS] and CCCR[CPDIS] must always be identical and
++ *changed together". "If PLLs are to be turned off using xPDIS then
++ *set xPDIS before frequency change and clear xPDIS after frequency
++ *change"
++ */
++
++ if( (l > PLL_L_MIN) && ( cpll_enabled == 0 ) ){
++ printk(KERN_WARNING
++ "DPM: when l>0 (NOT 13M mode) CPLL must be On \n");
++ return -EINVAL;
++ }
++ if( (cpll_enabled>0) && (ppll_enabled==0) ){
++ printk(KERN_WARNING
++ "DPM: illegal combination CPLL=On PPLL=Off\n");
++ return -EINVAL;
++ }
++
++ /* Check if voltage is correct */
++ if(v < -1){
++ printk(KERN_WARNING
++ "DPM: incorrect voltage %d\n",
++ v);
++ return -EINVAL;
++ }
++
++ if ((l != -1) && (n2 != -1)) {
++ if (((l && n2) == 0) && (l || n2) != 0) {
++ /* If one of L or N2 is 0, but they are not both 0 */
++ printk(KERN_WARNING
++ "DPM: L/N (%d/%d) must both be 0 or both be non-zero\n",
++ l, n2);
++ return -EINVAL;
++ }
++
++ /* Standard range checking */
++ if (((l > 0) && (n2 > 0)) && /* Don't complain about 0, it means sleep */
++ ((l > PLL_L_MAX) ||
++ (n2 > PLL_N_MAX) || (l < PLL_L_MIN) || (n2 < PLL_N_MIN))) {
++ /* Range checking */
++ printk(KERN_WARNING
++ "DPM: L/N (%d/%d) out of range, L=1-31, N=2-8 \n",
++ l, n2);
++ return -EINVAL;
++ }
++
++ /* If this is for 13M mode, do some more checking */
++ if (l == PLL_L_MIN) {
++ /*
++ NOTE: the Yellow Book does not require any
++ particular setting for N, but we think it really
++ should be 2
++ */
++ if (n2 != 2) {
++ printk(KERN_WARNING
++ "DPM: When L=1 (13M Mode), N must be 2 (%d)\n",
++ n2);
++ return -EINVAL;
++ }
++
++ if ((cpll_enabled != 0) && (cpll_enabled != -1)) {
++ printk(KERN_WARNING
++ "DPM: When L=1 (13M Mode), CPLL must be OFF (%d)\n",
++ cpll_enabled);
++ return -EINVAL;
++ }
++
++ /* Page 3-32, section 3.5.7.5.2 of the Yellow Book
++ says, "Notes: Other bits in the CLKCFG can not be
++ changed while entering or exiting the 13M
++ mode. While in 13M mode, it is illegal to write to
++ CLKCFG's B, HT, or T bits"
++ */
++ if ((b > 0) || (half_turbo > 0)) {
++ printk(KERN_WARNING
++ "DPM: When L=1 (13M Mode), B (%d) and "
++ "Half-Turbo (%d) must be off\n", b, half_turbo);
++ return -EINVAL;
++ }
++ }
++ }
++
++ if (half_turbo > 1) {
++ printk(KERN_WARNING "DPM: Half-Turbo must be 0 or 1 (%d)\n",
++ half_turbo);
++ return -EINVAL;
++ }
++
++ if (b > 1) {
++ printk(KERN_WARNING
++ "DPM: Fast-Bus Mode (B) must be 0 or 1 (%d)\n", b);
++ return -EINVAL;
++ }
++
++ /* 2800002.pdf 3.5.7.1 It is illegal to set B if CCCR[CPDIS] is set. */
++ if( cpll_enabled==0 && b == 1){
++ printk(KERN_WARNING
++ "DPM: fast bus (b=%d) must both be 0 if CPLL is Off\n",
++ b);
++ return -EINVAL;
++ }
++
++ if (cccra > 1) {
++ printk(KERN_WARNING
++ "DPM: CCCR[A] (alternate MEMC clock) must be 0 or 1 (%d)\n",
++ cccra);
++ return -EINVAL;
++ }
++
++ /* This (when CCCR[A] is on and FastBus is on, L must be <=16)
++ is explicitly stated in text at the bottom of one of the
++ CPU frequency tables--the one where CCCR[A] is on */
++ if ((b == 1) && (cccra == 1) && (l > 16)) {
++ printk(KERN_WARNING
++ "DPM: when B=1 and CCCR[A]=1, L must be <= 16 (L is %d)\n",
++ l);
++ return -EINVAL;
++ }
++
++ /* This one is not explicitly stated the Yellow Book as a bad
++ thing (as the previous restriction is), but according to
++ the CPU frequency tables, fast bus mode *cannot* be
++ supported, even when CCCR[A] is not 1.
++ */
++ if ((b == 1) && (l > 16)) {
++ printk(KERN_WARNING
++ "DPM: when B=1, L must be <= 16 (L is %d)\n", l);
++ return -EINVAL;
++ }
++
++ if (n2 != -1) {
++ if ((half_turbo == 1) && (n2 != 6) && (n2 != 8)) {
++ printk(KERN_WARNING
++ "DPM: Half Turbo only allowed when N2 is 6 or 8\n"
++ "(N2 is %d)\n", n2);
++ return -EINVAL;
++ }
++ }
++
++ /* Check Sleep Mode versus modes from pm.h
++ NOTE: CPUMODE_SENSE is not implemented.
++ */
++ if ((l == 0) && (n2 == 0) && (sleep_mode != -1) &&
++ (sleep_mode != CPUMODE_STANDBY) &&
++ (sleep_mode != CPUMODE_SLEEP) &&
++ (sleep_mode != CPUMODE_DEEPSLEEP)) {
++ printk(KERN_WARNING
++ "DPM: Sleep Mode value %d is not allowed"
++ " (only %d, %d, or %d) l=%d n2=%d\n",
++ sleep_mode,
++ CPUMODE_STANDBY, CPUMODE_SLEEP, CPUMODE_DEEPSLEEP,
++ l, n2);
++ return -EINVAL;
++ }
++
++ /* save the values for this operating point */
++ md_opt->v = v;
++ md_opt->l = l;
++ md_opt->n = n2;
++ md_opt->b = b;
++ md_opt->cccra = cccra;
++ md_opt->half_turbo = half_turbo;
++ md_opt->cpll_enabled = cpll_enabled;
++ md_opt->ppll_enabled = ppll_enabled;
++ md_opt->sleep_mode = sleep_mode;
++ calculate_lcd_freq(md_opt);
++
++ if ((md_opt->l == -1) || (md_opt->n == -1)) {
++ md_opt->cpu = -1;
++ } else {
++ /* shift 1 to divide by 2 because opt->n is 2*N */
++ md_opt->cpu = (13000 * md_opt->l * md_opt->n) >> 1;
++ if (md_opt->half_turbo == 1) {
++ /* divide by 2 */
++ md_opt->cpu = md_opt->cpu >> 1;
++ }
++ }
++
++ return 0;
++}
++
++static void fully_define_opt(struct dpm_md_opt *cur, struct dpm_md_opt *new)
++{
++ if (new->v == -1)
++ new->v = cur->v;
++ if (new->l == -1)
++ new->l = cur->l;
++ if (new->n == -1)
++ new->n = cur->n;
++ if (new->b == -1)
++ new->b = cur->b;
++ if (new->half_turbo == -1)
++ new->half_turbo = cur->half_turbo;
++ if (new->cccra == -1)
++ new->cccra = cur->cccra;
++ if (new->cpll_enabled == -1)
++ new->cpll_enabled = cur->cpll_enabled;
++ if (new->ppll_enabled == -1)
++ new->ppll_enabled = cur->ppll_enabled;
++ if (new->sleep_mode == -1)
++ new->sleep_mode = cur->sleep_mode;
++
++ if (new->n > 2) {
++ new->turbo = 1;
++ /* turbo mode: 13K * L * (N/2)
++ Shift at the end to divide N by 2 for Turbo mode or
++ by 4 for Half-Turbo mode )
++ */
++ new->cpu = (13000 * new->l * new->n) >>
++ ((new->half_turbo == 1) ? 2 : 1);
++ } else {
++ new->turbo = 0;
++ /* run mode */
++ new->cpu = 13000 * new->l;
++ }
++ /* lcd freq is derived from L */
++ calculate_lcd_freq(new);
++ calculate_reg_values(new);
++ /* We want to keep a baseline loops_per_jiffy/cpu-freq ratio
++ to work off of for future calculations, especially when
++ emerging from sleep when there is no current cpu frequency
++ to calculate from (because cpu-freq of 0 means sleep).
++ */
++ if (!saved_loops_per_jiffy) {
++ saved_loops_per_jiffy = loops_per_jiffy;
++ saved_cpu_freq = cur->cpu;
++ }
++
++ if (new->cpu) {
++ /* Normal change (not sleep), just compute. Always use
++ the "baseline" lpj and freq */
++ new->lpj =
++ dpm_compute_lpj(saved_loops_per_jiffy, saved_cpu_freq,
++ new->cpu);
++ } else {
++ /* If sleeping, keep the old LPJ */
++ new->lpj = loops_per_jiffy;
++ }
++}
++
++static void xpll_on(struct dpm_regs *regs)
++{
++ int tmp_cccr, tmp_ccsr;
++ int new_cpllon=0, new_ppllon=0, cur_cpllon=0;
++ int cur_ppllon=0, start_cpll=0, start_ppll=0;
++
++ tmp_ccsr = CCSR;
++
++ if ((regs->cccr & CCCR_CPDIS_BIT_ON) == 0)
++ new_cpllon = 1;
++ if ((regs->cccr & CCCR_PPDIS_BIT_ON) == 0)
++ new_ppllon = 1;
++ if (((tmp_ccsr >> 31) & 0x1) == 0)
++ cur_cpllon = 1;
++ if (((tmp_ccsr >> 30) & 0x1) == 0)
++ cur_ppllon = 1;
++
++ if ((new_cpllon == 1) && (cur_cpllon == 0))
++ start_cpll=1;
++
++ if ((new_ppllon == 1) && (cur_ppllon == 0))
++ start_ppll=1;
++
++ if ((start_cpll == 0) && (start_ppll == 0))
++ return;
++
++ /* NOTE: the Yellow Book says that exiting 13M mode requires a
++ PLL relock, which takes at least 120uS, so the book suggests
++ the OS could use a timer to keep busy until it is time to
++ check the CCSR bits which must happen before changing the
++ frequency back.
++
++ For now, we'll just loop.
++ */
++
++ /* From Yellow Book, page 3-31, section 3.5.7.5 13M Mode
++
++ Exiting 13M Mode:
++
++ 1. Remain in 13M mode, but early enable the PLL via
++ CCCR[CPDIS, PPDIS]=11, and CCCR[PLL_EARLY_EN]=1. Doing
++ so will allow the PLL to be started early.
++
++ 2. Read CCCR and compare to make sure that the data was
++ correctly written.
++
++ 3. Check to see if CCS[CPLOCK] and CCSR[PPLOCK] bits are
++ both set. Once these bits are both high, the PLLs are
++ locked and you may move on.
++
++ 4. Note that the CPU is still in 13M mode, but the PLLs are
++ started.
++
++ 5. Exit from 13M mode by writing CCCR[CPDIS, PPDIS]=00, but
++ maintain CCCR[PLL_EARLY_EN]=1. This bit will be cleared
++ by the imminent frequency change.
++ */
++
++ /* Step 1 */
++ tmp_cccr = CCCR;
++
++ if (start_cpll)
++ tmp_cccr |= CCCR_CPDIS_BIT_ON;
++
++ if(start_ppll)
++ tmp_cccr |= CCCR_PPDIS_BIT_ON;
++
++ tmp_cccr |= CCCR_PLL_EARLY_EN_BIT_ON;
++ CCCR = tmp_cccr;
++
++ /* Step 2 */
++ tmp_cccr = CCCR;
++
++#ifdef DEBUG
++ if ((tmp_cccr & CCCR_PLL_EARLY_EN_BIT_ON) != CCCR_PLL_EARLY_EN_BIT_ON)
++ printk(KERN_WARNING
++ "DPM: Warning: PLL_EARLY_EN is NOT on\n");
++
++ if ((start_cpll==1) &&
++ ((tmp_cccr & CCCR_CPDIS_BIT_ON) != CCCR_CPDIS_BIT_ON))
++ printk(KERN_WARNING
++ "DPM: Warning: CPDIS is NOT on\n");
++
++ if ((start_ppll==1) &&
++ (tmp_cccr & CCCR_PPDIS_BIT_ON) != CCCR_PPDIS_BIT_ON)
++ printk(KERN_WARNING
++ "DPM: Warning: PPDIS is NOT on\n");
++#endif
++
++ /* Step 3 */
++ {
++ /* Note: the point of this is to "wait" for the lock
++ bits to be set; the Yellow Book says this may take
++ a while, but observation indicates that it is
++ instantaneous.
++ */
++
++ long volatile int i = 0;
++
++ int cpll_complete=1;
++ int ppll_complete=1;
++
++ if (start_cpll == 1)
++ cpll_complete=0;
++
++ if (start_ppll == 1)
++ ppll_complete=0;
++
++ /*loop arbitrary big value to prevent looping forever */
++ for (i = 0; i < 999999; i++) {
++ tmp_ccsr = CCSR;
++
++ if (tmp_ccsr & CCSR_CPLL_LOCKED)
++ cpll_complete=1;
++
++ if (tmp_ccsr & CCSR_PPLL_LOCKED)
++ ppll_complete=1;
++
++ if ((cpll_complete == 1) && (ppll_complete == 1))
++ break;
++ }
++ }
++
++ /* Step 4: NOP */
++
++ /* Step 5
++ Clear the PLL disable bits - do NOT do it here.
++ */
++
++ /* But leave EARLY_EN on; it will be cleared by the frequency change */
++ regs->cccr |= CCCR_PLL_EARLY_EN_BIT_ON;
++
++ /*
++ Step 6: Now go continue on with frequency change
++ We do this step later as if voltage is too low,
++ we must ensure that it rised up before entereng to higher
++ freq mode or simultaniously.
++ */
++}
++
++static int set_opt(struct dpm_opt *curop, struct dpm_opt *newop)
++{
++ struct dpm_md_opt *cur, *new;
++ int current_n = (CCSR & CCCR_N_MASK) >> 7;
++ int set_opt_flags = 0;
++ unsigned int cccr, clkcfg = 0;
++ unsigned long s;
++
++#define SET_OPT_CPUFREQ (1 << 0)
++#define SET_OPT_VOLTAGE (1 << 1)
++#define SET_OPT_TURBO_ON (1 << 2)
++#define SET_OPT_TURBO_OFF (1 << 3)
++#define SET_OPT_TURBO (SET_OPT_TURBO_ON | SET_OPT_TURBO_OFF)
++
++ pr_debug("set_opt: %s => %s\n", curop->name, newop->name);
++
++ cur = &curop->md_opt;
++ new = &newop->md_opt;
++ fully_define_opt(cur, new);
++
++ if (new->regs.voltage != cur->regs.voltage)
++ set_opt_flags |= SET_OPT_VOLTAGE;
++
++ if (new->cpu) {
++ if ((new->regs.cccr != cur->regs.cccr) ||
++ (new->regs.clkcfg != cur->regs.clkcfg)) {
++
++ /* Find out if it is *just* a turbo bit change */
++
++ if ((cur->l == new->l) &&
++ (cur->cccra == new->cccra) &&
++ (cur->b == new->b) &&
++ (cur->half_turbo == new->half_turbo)) {
++ /* If the real, current N is a turbo freq and
++ the new N is not a turbo freq, then set
++ TURBO_OFF and do not change N.
++ */
++ if ((cur->n > 1) && (new->n == 2))
++ set_opt_flags |= SET_OPT_TURBO_OFF;
++
++ /* Else if the current operating point's N is
++ not-turbo and the new N is the desired
++ destination N, then set TURBO_ON
++ */
++ else if ((cur->n == 2) && (new->n == current_n)) {
++ /* Desired N must be what is current
++ set in the CCCR/CCSR.
++ */
++ set_opt_flags |= SET_OPT_TURBO_ON;
++ }
++ /* Else, fall through to regular FCS */
++ }
++
++ if (!(set_opt_flags & SET_OPT_TURBO)) {
++ /* It this is not a Turbo bit only change, it
++ must be a regular FCS.
++ */
++ set_opt_flags |= SET_OPT_CPUFREQ;
++ }
++ loops_per_jiffy = new->lpj;
++ }
++
++ local_irq_save(s);
++
++ /* If exiting 13M mode (turn on PLL(s)), do some extra work
++ before changing the CPU frequency or voltage.
++ We may turn on a combination of PLLs supported by hardware
++ only. Otherwise xpll_on(...) hang the system.
++ */
++
++ if ((!cur->cpll_enabled && new->cpll_enabled) ||
++ (!cur->ppll_enabled && new->ppll_enabled))
++ xpll_on(&new->regs);
++
++ /* In accordance with Yellow Book section 3.7.6.3, "Coupling
++ Voltage Change with Frequency Change", always set the
++ voltage first (setting the FVC bit in the PCFR) and then do
++ the frequency change
++ */
++
++ if (set_opt_flags & SET_OPT_VOLTAGE) {
++ if (set_opt_flags & SET_OPT_CPUFREQ)
++ /* coupled voltage & freq change */
++ scale_voltage_coupled(&new->regs);
++ else
++ /* Scale CPU voltage un-coupled with freq */
++ scale_voltage(&new->regs);
++ }
++
++ if (set_opt_flags & SET_OPT_CPUFREQ) /* Scale CPU freq */
++ scale_cpufreq(&new->regs);
++
++ if ((set_opt_flags & SET_OPT_VOLTAGE) &&
++ (set_opt_flags & SET_OPT_CPUFREQ))
++ PCFR &= ~PCFR_FVC;
++
++ if (set_opt_flags & SET_OPT_TURBO) {
++ clkcfg = read_clkcfg();
++
++ /* Section 3.5.7 of the Yellow Book says that the F
++ bit will be left on after a FCS, so we need to
++ explicitly clear it. But do not change the B bit.
++ */
++
++ clkcfg &= ~(CLKCFG_F_BIT);
++
++ if (set_opt_flags & SET_OPT_TURBO_ON)
++ clkcfg = clkcfg | (CLKCFG_T_BIT);
++ else
++ clkcfg = clkcfg & ~(CLKCFG_T_BIT);
++
++ /* enable */
++ set_freq(clkcfg);
++ }
++
++ if (new->half_turbo != cur->half_turbo) {
++ if ((set_opt_flags & SET_OPT_CPUFREQ) ||
++ (set_opt_flags & SET_OPT_VOLTAGE)) {
++ /*
++ From the Yellow Book, p 3-106:
++
++ "Any two writes to CLKCFG or PWRMODE
++ registers must be separated by six 13-MHz
++ cycles. This requirement is achieved by
++ doing the write to the CLKCFG or POWERMODE
++ register, performing a read of CCCR, and
++ then comparing the value in the CLKCFG or
++ POWERMODE register to the written value
++ until it matches."
++
++ Since the setting of half turbo is a
++ separate write to CLKCFG, we need to adhere
++ to this requirement.
++ */
++
++ cccr = CCCR;
++ clkcfg = read_clkcfg();
++ while (clkcfg != new->regs.clkcfg)
++ clkcfg = read_clkcfg();
++ }
++
++ if (clkcfg == 0)
++ clkcfg = new->regs.clkcfg;
++
++ /* Turn off f-bit.
++
++ According to the Yellow Book, page 3-23, "If only
++ HT is set, F is clear, and B is not altered, then
++ the core PLL is not stopped." */
++
++ clkcfg = clkcfg & ~(CLKCFG_F_BIT);
++
++ /* set half turbo bit */
++
++ if (new->half_turbo)
++ clkcfg = clkcfg | (CLKCFG_HT_BIT);
++ else
++ clkcfg = clkcfg & ~(CLKCFG_HT_BIT);
++
++ /* enable */
++
++ set_freq(clkcfg);
++ loops_per_jiffy = new->lpj;
++ }
++
++ local_irq_restore(s);
++
++ } else {
++
++ /*
++ * A sleep operating point.
++ */
++
++#ifdef CONFIG_PM
++ /* NOTE: voltage needs i2c, so be sure to change
++ voltage BEFORE* calling device_suspend
++ */
++
++ if (set_opt_flags & SET_OPT_VOLTAGE)
++ /* Scale CPU voltage un-coupled with freq */
++ scale_voltage(&new->regs);
++
++ if (new->sleep_mode == CPUMODE_STANDBY)
++ pm_suspend(PM_SUSPEND_STANDBY);
++ else if (new->sleep_mode == CPUMODE_DEEPSLEEP)
++ ; // not supported upstream yet
++ else
++ pm_suspend(PM_SUSPEND_MEM);
++
++ /* Here when we wake up. */
++#endif /*CONFIG_PM*/
++
++ /* Recursive call to switch back to to task state. */
++ dpm_set_os(DPM_TASK_STATE);
++ }
++
++#ifdef CONFIG_DPM_STATS
++ dpm_update_stats(&newop->stats, &dpm_active_opt->stats);
++#endif
++ dpm_active_opt = newop;
++ mb();
++
++ /* Devices only need to scale on a core frequency
++ change. Half-Turbo changes are separate from the regular
++ frequency changes, so Half-Turbo changes do not need to
++ trigger a device recalculation.
++
++ NOTE: turbo-mode-only changes could someday also be
++ optimized like Half-Turbo (to not trigger a device
++ recalc).
++ */
++
++ if (new->cpu && (set_opt_flags & SET_OPT_CPUFREQ))
++ /* Normal change (not sleep), just compute. Always use
++ the "baseline" lpj and freq */
++ dpm_driver_scale(SCALE_POSTCHANGE, newop);
++
++ return 0;
++}
++
++/* Fully determine the current machine-dependent operating point, and fill in a
++ structure presented by the caller. This should only be called when the
++ dpm_sem is held. This call can return an error if the system is currently at
++ an operating point that could not be constructed by dpm_md_init_opt(). */
++
++static int get_opt(struct dpm_opt *opt)
++{
++ unsigned int tmp_cccr;
++ unsigned int cpdis;
++ unsigned int ppdis;
++ struct dpm_md_opt *md_opt = &opt->md_opt;
++
++ /* You should read CCSR to see what's up...but there is no A
++ bit in the CCSR, so we'll grab it from the CCCR.
++ */
++ tmp_cccr = CCCR;
++ md_opt->cccra = (tmp_cccr >> 25) & 0x1; /* cccr[A]: bit 25 */
++
++ /* NOTE: the current voltage is not obtained, but will be left
++ as 0 in the opt which will mean no voltage change at all.
++ */
++
++ md_opt->regs.cccr = CCSR;
++
++ md_opt->l = md_opt->regs.cccr & CCCR_L_MASK; /* Get L */
++ md_opt->n = (md_opt->regs.cccr & CCCR_N_MASK) >> 7; /* Get 2N */
++
++ /* This should never really be less than 2 */
++ if (md_opt->n < 2) {
++ md_opt->n = 2;
++ }
++
++ md_opt->regs.clkcfg = read_clkcfg();
++ md_opt->b = (md_opt->regs.clkcfg >> 3) & 0x1; /* Fast Bus (b): bit 3 */
++ md_opt->turbo = md_opt->regs.clkcfg & 0x1; /* Turbo is bit 1 */
++ md_opt->half_turbo = (md_opt->regs.clkcfg >> 2) & 0x1; /* HalfTurbo: bit 2 */
++
++ calculate_lcd_freq(md_opt);
++
++ /* are any of the PLLs is on? */
++ cpdis = ((md_opt->regs.cccr >> 31) & 0x1);
++ ppdis = ((md_opt->regs.cccr >> 30) & 0x1);
++ /* Newer revisions still require that if CPLL is On
++ then PPLL must also be On.
++ */
++ if ((cpdis == 0) && (ppdis != 0)) {
++ /* CPLL=On PPLL=Off is NOT supported with hardware.
++ NOTE:"B0"-revision has even more restrictive requirments
++ to PLLs
++ */
++ printk("DPM: cpdis and ppdis are not in sync!\n");
++ }
++
++ md_opt->cpll_enabled = (cpdis == 0);
++ md_opt->ppll_enabled = (ppdis == 0);
++
++ /* Shift 1 to divide by 2 (because opt->n is really 2*N */
++ if (md_opt->turbo) {
++ md_opt->cpu = (13000 * md_opt->l * md_opt->n) >> 1;
++ } else {
++ /* turbo bit is off, so skip N multiplier (no matter
++ what N really is) and use Run frequency (13K * L)
++ */
++ md_opt->cpu = 13000 * md_opt->l;
++ }
++
++ return 0;
++}
++
++/****************************************************************************
++ * DPM Idle Handler
++ ****************************************************************************/
++
++static void (*orig_idle) (void);
++
++static void dpm_pxa27x_idle(void)
++{
++ extern void default_idle(void);
++
++ if (orig_idle)
++ orig_idle();
++ else {
++ /*
++ * arch/arm/kernel/process.c: default_idle()
++ * do be sure to watch for updates :(
++ */
++
++ local_irq_disable();
++ if (!need_resched()) {
++ timer_dyn_reprogram();
++ arch_idle();
++ }
++ local_irq_enable();
++ }
++}
++
++/****************************************************************************
++ * Initialization/Exit
++ ****************************************************************************/
++
++extern void (*pm_idle) (void);
++
++static void startup(void)
++{
++ orig_idle = pm_idle;
++ pm_idle = dpm_idle;
++}
++
++static void cleanup(void)
++{
++ pm_idle = orig_idle;
++}
++
++static int __init dpm_pxa27x_init(void)
++{
++ printk("PXA27x Dynamic Power Management\n");
++
++ if (freq_init()) {
++ printk("PXA27x DPM init failed\n");
++ return -1;
++ }
++
++ vcs_init();
++ dpm_md.init_opt = init_opt;
++ dpm_md.set_opt = set_opt;
++ dpm_md.get_opt = get_opt;
++ dpm_md.check_constraint = dpm_default_check_constraint;
++ dpm_md.idle = dpm_pxa27x_idle;
++ dpm_md.startup = startup;
++ dpm_md.cleanup = cleanup;
++ return 0;
++}
++
++static void __exit dpm_pxa27x_exit(void){
++ freq_cleanup();
++}
++
++__initcall(dpm_pxa27x_init);
++__exitcall(dpm_pxa27x_exit);
+Index: linux-2.6.16/drivers/serial/pxa.c
+===================================================================
+--- linux-2.6.16.orig/drivers/serial/pxa.c
++++ linux-2.6.16/drivers/serial/pxa.c
+@@ -43,6 +43,8 @@
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
+ #include <linux/serial_core.h>
++#include <linux/dpm.h>
++#include <linux/notifier.h>
+
+ #include <asm/io.h>
+ #include <asm/hardware.h>
+@@ -57,6 +59,7 @@ struct uart_pxa_port {
+ unsigned char mcr;
+ unsigned int lsr_break_flag;
+ unsigned int cken;
++ unsigned int baud;
+ char *name;
+ };
+
+@@ -473,6 +476,7 @@ serial_pxa_set_termios(struct uart_port
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
++ up->baud = baud; // save for DPM scale callback
+
+ if ((up->port.uartclk / quot) < (2400 * 16))
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1;
+@@ -817,6 +821,44 @@ static int serial_pxa_resume(struct plat
+ return 0;
+ }
+
++static int serial_pxa_scale(struct notifier_block *self,
++ unsigned long level, void *newop)
++{
++ int n=0;int i=0;
++ int ccsr=CCSR;
++ n=sizeof(serial_pxa_ports)/sizeof(struct uart_pxa_port);
++
++ for (i=0; i<n; ++i) {
++ struct uart_pxa_port *up = (struct uart_pxa_port *) &serial_pxa_ports[i];
++
++ if(up->baud) {
++ unsigned int quot;
++
++ if ((ccsr & (1<<30))){
++ if (up->port.uartclk == 13000000)
++ return 0;
++
++ /*if PPLL is Off (clocking with 13MHz now)*/
++ up->port.uartclk = 13000000;
++ } else {
++ if (up->port.uartclk == 921600 * 16)
++ return 0;
++
++ /*clocking with 14.7456 MHz*/
++ up->port.uartclk = 921600 * 16;
++ }
++
++ quot = uart_get_divisor(&up->port, up->baud);
++ serial_out(up, UART_LCR, up->lcr | UART_LCR_DLAB);/* set DLAB */
++ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
++ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
++ serial_out(up, UART_LCR, up->lcr); /* reset DLAB */
++ }
++ }
++
++ return 0;
++}
++
+ static int serial_pxa_probe(struct platform_device *dev)
+ {
+ serial_pxa_ports[dev->id].port.dev = &dev->dev;
+@@ -848,6 +890,10 @@ static struct platform_driver serial_pxa
+ },
+ };
+
++static struct notifier_block serial_pxa_nb = {
++ .notifier_call = serial_pxa_scale,
++};
++
+ int __init serial_pxa_init(void)
+ {
+ int ret;
+@@ -860,6 +906,9 @@ int __init serial_pxa_init(void)
+ if (ret != 0)
+ uart_unregister_driver(&serial_pxa_reg);
+
++ if (! ret)
++ dpm_register_scale(&serial_pxa_nb, SCALE_POSTCHANGE);
++
+ return ret;
+ }
+
+@@ -867,6 +916,7 @@ void __exit serial_pxa_exit(void)
+ {
+ platform_driver_unregister(&serial_pxa_driver);
+ uart_unregister_driver(&serial_pxa_reg);
++ dpm_unregister_scale(&serial_pxa_nb, SCALE_POSTCHANGE);
+ }
+
+ module_init(serial_pxa_init);
+Index: linux-2.6.16/drivers/video/pxafb.c
+===================================================================
+--- linux-2.6.16.orig/drivers/video/pxafb.c
++++ linux-2.6.16/drivers/video/pxafb.c
+@@ -1000,6 +1000,39 @@ static int pxafb_resume(struct platform_
+ #define pxafb_resume NULL
+ #endif
+
++#ifdef CONFIG_DPM
++#include <linux/dpm.h>
++
++#define WAIT_FOR_LCD_INTR(reg,intr,timeout) ({ \
++ int __done =0; \
++ int __t = timeout; \
++ while (__t) { \
++ __done = (reg) & (intr); \
++ if (__done) break; \
++ udelay(50); \
++ __t--; \
++ } \
++ __done; \
++})
++
++static int pxafb_scale(struct notifier_block *nb, unsigned long val, void *data)
++{
++#if 0 // Without disable.enable overlays, doesn't work well yet
++ struct pxafb_info *fbi = TO_INF(nb, scale);
++ u_int pcd;
++
++ LCSR = 0xffffffff; /* Clear LCD Status Register */
++ LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */
++ LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */
++ WAIT_FOR_LCD_INTR(LCSR,LCSR_LDD,20);
++ pcd = get_pcd(fbi->fb.var.pixclock);
++ fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd);
++ pxafb_enable_controller(fbi);
++#endif
++ return 0;
++}
++#endif
++
+ /*
+ * pxafb_map_video_memory():
+ * Allocates the DRAM memory for the frame buffer. This buffer is
+@@ -1367,6 +1400,10 @@ int __init pxafb_probe(struct platform_d
+ cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER);
+ #endif
++#ifdef CONFIG_DPM
++ fbi->scale.notifier_call = pxafb_scale;
++ dpm_register_scale(&fbi->scale, SCALE_POSTCHANGE);
++#endif
+
+ /*
+ * Ok, now enable the LCD controller
+Index: linux-2.6.16/drivers/video/pxafb.h
+===================================================================
+--- linux-2.6.16.orig/drivers/video/pxafb.h
++++ linux-2.6.16/drivers/video/pxafb.h
+@@ -95,6 +95,10 @@ struct pxafb_info {
+ struct notifier_block freq_transition;
+ struct notifier_block freq_policy;
+ #endif
++
++#ifdef CONFIG_DPM
++ struct notifier_block scale;
++#endif
+ };
+
+ #define TO_INF(ptr,member) container_of(ptr,struct pxafb_info,member)