summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--recipes/linux/linux-sgh-i900/sgh-i900-support.patch13031
-rw-r--r--recipes/linux/linux-sgh-i900/sgh_i900_defconfig414
-rw-r--r--recipes/linux/linux-sgh-i900/wm97xx-ts-fix.patch130
-rw-r--r--recipes/linux/linux-sgh-i900_2.6.32.bb (renamed from recipes/linux/linux-sgh-i900_2.6.29.bb)10
4 files changed, 13283 insertions, 302 deletions
diff --git a/recipes/linux/linux-sgh-i900/sgh-i900-support.patch b/recipes/linux/linux-sgh-i900/sgh-i900-support.patch
new file mode 100644
index 0000000000..28d65938a1
--- /dev/null
+++ b/recipes/linux/linux-sgh-i900/sgh-i900-support.patch
@@ -0,0 +1,13031 @@
+diff -ur linux-2.6.32/arch/arm/Kconfig kernel/arch/arm/Kconfig
+--- linux-2.6.32/arch/arm/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/Kconfig 2009-12-12 16:09:25.656278659 +0200
+@@ -1502,6 +1502,112 @@
+ config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
++config PXA_DVFM
++ bool "PXA Processor High Level DVFM support"
++ depends on PM
++ default y
++ help
++ This enables the dynamical frequency and voltage changes framework
++ for PXA Processor series.
++
++config PXA_MIPSRAM
++ bool "PXA MIPSRAM monitoring support"
++ default n
++ help
++ Enable MIPS RAM monitoring for process switching implemented in
++ the scheduler
++
++config PXA3xx_DVFM
++ bool "PXA3xx Processor DVFM support"
++ depends on PM && PXA3xx && PXA_DVFM
++# select PXA3xx_ARAVA
++# select PXA3xx_MICCO
++ default y
++ help
++ This implements the dynamical frequency and voltage changes features
++ for PXA3xx Processor particularly.
++
++config PXA3xx_DVFM_STATS
++ bool "PXA3xx/PXA930 Processor DVFM Statistics support"
++ depends on PXA3xx_DVFM
++ select RELAY
++ select DEBUG_FS
++ default y
++ help
++ This is used to collect statistics during the dynamic frequency
++ and voltage changes
++
++config PXA3xx_PMU
++ bool "PXA3xx/PXA930 Processor PMU support"
++ default y
++ help
++ PXA3xx/PXA930 provide Performance Monitor Unit to report
++ CPU statistics info.
++
++config PXA3xx_PRM
++ bool "PXA3xx Processor Profiler Resource Manager"
++ depends on PXA3xx_DVFM && PXA3xx_PMU
++ default y
++ help
++ This enables the PXA3xx Processor Profiler Resource Manager
++
++config IPM
++ bool "Marvell(R) Scalable Power Management Profiler"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Support Profiler of Marvell(R) Scalable Power Management
++
++config IPMC
++ bool "Marvell(R) Scalable Power Management Userspace Daemon"
++ depends on PXA3xx_PRM
++ default n
++ help
++ Support Userspace Daemon of Marvell(R) Scalable Power Management
++
++config BPMD
++ bool "Borqs Scalable Power Management Kernel Daemon"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Kernel Daemon of Borqs Scalable Power Management
++
++config TEST_BPMD
++ bool "Borqs Scalable Power Management Test Module"
++ depends on PXA3xx_PRM
++ default y
++ help
++ Test Module of Borqs Scalable Power Management
++
++config IPM_DEEPIDLE
++ bool "PXA3xx/PXA930 Processor Deep Idle support"
++ depends on IPM
++ default y
++ help
++ This enables the kernel support for PXA3xx/PXA930
++ Processor Deep Idle (D0CS Idle)
++
++config IPM_D2IDLE
++ bool "Support PXA3xx/PXA930 Processor D2 Mode as Idle"
++ depends on IPM && PXA_32KTIMER
++ default y
++ help
++ This enables kernel support PXA3xx/PXA930 D2 idle
++
++config PERIPHERAL_STATUS
++ bool "Support list peripheral status of pm"
++ depends on PM
++ default y
++ help
++ This enables kernel support peripheral status calculate
++
++config IPM_CGIDLE
++ bool "Support PXA935 Processor Clock Gated Mode as Idle"
++ depends on IPM && PXA_32KTIMER
++ default y
++ help
++ This enables kernel support PXA935 D2 idle
++
+ endmenu
+
+ source "net/Kconfig"
+diff -ur linux-2.6.32/arch/arm/mach-pxa/Kconfig kernel/arch/arm/mach-pxa/Kconfig
+--- linux-2.6.32/arch/arm/mach-pxa/Kconfig 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/Kconfig 2009-12-12 16:09:26.426281936 +0200
+@@ -27,6 +27,12 @@
+ bool "PXA950 (codename Tavor-PV2)"
+ select CPU_PXA930
+
++config PXA3xx_PMIC
++ bool "PXA3xx PMIC support"
++ default y
++ help
++ PMIC support
++
+ endmenu
+
+ endif
+@@ -303,6 +309,18 @@
+ select HAVE_PWM
+ select PXA_HAVE_BOARD_IRQS
+
++config MACH_SGH_I900
++ bool "Samsung SGH-i900 (Omnia) phone"
++ select PXA3xx
++ select CPU_PXA310
++ select HAVE_PWM
++
++config MACH_SGH_I780
++ bool "Samsung SGH-i780 phone"
++ select PXA3xx
++ select CPU_PXA310
++ select HAVE_PWM
++
+ config MACH_LITTLETON
+ bool "PXA3xx Form Factor Platform (aka Littleton)"
+ select PXA3xx
+diff -ur linux-2.6.32/arch/arm/mach-pxa/Makefile kernel/arch/arm/mach-pxa/Makefile
+--- linux-2.6.32/arch/arm/mach-pxa/Makefile 2009-12-03 05:51:21.000000000 +0200
++++ kernel/arch/arm/mach-pxa/Makefile 2009-12-12 16:09:26.426281936 +0200
+@@ -5,6 +5,15 @@
+ # Common support (must be linked before board specific support)
+ obj-y += clock.o devices.o generic.o irq.o \
+ time.o reset.o
++obj-$(CONFIG_PXA_DVFM) += dvfm.o
++ifeq ($(CONFIG_PXA3xx), y)
++ obj-$(CONFIG_PXA3xx_PMIC) += pxa3xx_pmic.o
++ obj-$(CONFIG_PXA3xx_DVFM) += pxa3xx_dvfm.o pxa3xx_dvfm_ll.o
++ obj-$(CONFIG_PXA3xx_PMU) += pmu.o pmu_ll.o
++ obj-$(CONFIG_PXA3xx_PRM) += prm.o
++ obj-$(CONFIG_BPMD) += bpm.o bpm_prof.o
++endif
++
+ obj-$(CONFIG_PM) += pm.o sleep.o standby.o
+
+ ifeq ($(CONFIG_CPU_FREQ),y)
+@@ -66,6 +75,8 @@
+ obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
+ obj-$(CONFIG_MACH_TREO680) += treo680.o
+ obj-$(CONFIG_ARCH_VIPER) += viper.o
++obj-$(CONFIG_MACH_SGH_I900) += sgh_i780_i900.o sgh_smd.o sgh_rpc.o
++obj-$(CONFIG_MACH_SGH_I780) += sgh_i780_i900.o sgh_smd.o sgh_rpc.o
+
+ ifeq ($(CONFIG_MACH_ZYLONITE),y)
+ obj-y += zylonite.o
+diff -ur linux-2.6.32/arch/arm/mach-pxa/bpm.c kernel/arch/arm/mach-pxa/bpm.c
+--- linux-2.6.32/arch/arm/mach-pxa/bpm.c 2009-12-13 12:57:59.831957275 +0200
++++ kernel/arch/arm/mach-pxa/bpm.c 2009-12-12 16:09:26.429614458 +0200
+@@ -0,0 +1,1814 @@
++/*
++ * linux/arch/arm/mach-pxa/bpm.c
++ *
++ * Provide bpm thread to scale system voltage & frequency dynamically.
++ *
++ * Copyright (C) 2008 Borqs Corporation.
++ *
++ * Author: Emichael Li <emichael.li@borqs.com>
++ *
++ * This software program is licensed subject to the GNU General Public License
++ * (GPL).Version 2,June 1991, available at http://www.fsf.org/copyleft/gpl.html
++ *
++ */
++
++#include <linux/kernel.h>
++#include <mach/prm.h>
++#include <mach/dvfm.h>
++#include <mach/mspm_prof.h>
++#include <linux/sysdev.h>
++#include <linux/delay.h>
++#include <mach/bpm.h>
++#include <mach/hardware.h>
++#include <mach/pxa3xx-regs.h>
++#include <linux/list.h>
++#include <asm/io.h>
++#include <asm/mach-types.h>
++#include <linux/freezer.h>
++#include <mach/regs-ost.h>
++#ifdef CONFIG_ANDROID_POWER
++#include <linux/android_power.h>
++#endif
++
++#define DEBUG
++
++#ifdef DEBUG
++#define PM_BUG_ON(condition) \
++ do { \
++ if (unlikely(condition)) { \
++ printk(KERN_ERR "BUG: failure at %s:%d/%s()!\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ WARN_ON(1); \
++ } \
++ } while(0)
++#define DPRINTK(fmt,args...) \
++ do { \
++ if (g_bpm_log_level) \
++ printk(KERN_ERR "%s: " fmt, __FUNCTION__ , ## args); \
++ } while (0)
++#else
++#define PM_BUG_ON(condition) \
++ do { \
++ if (unlikely(condition)) { \
++ printk(KERN_ERR "BUG: failure at %s:%d/%s()!\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ } \
++ } while(0)
++#define DPRINTK(fmt,args...) \
++ do {} while (0)
++#endif
++
++/*****************************************************************************/
++/* */
++/* Policy variables */
++/* */
++/*****************************************************************************/
++#define REDUCE_624M_DUTYCYCLE (1)
++
++#define BPM_FREQ_POLICY_NUM (3)
++#define BPM_PROFILER_WINDOW (100)
++#define SYSTEM_BOOTUP_TIME (15000)
++#define BPM_MAX_OP_NUM (10)
++
++struct bpm_freq_bonus_arg {
++ int mips;
++ int mem_stall;
++};
++
++struct bpm_freq_policy {
++ int lower[BPM_FREQ_POLICY_NUM];
++ int higher[BPM_FREQ_POLICY_NUM];
++};
++
++#define CONSTRAINT_ID_LEN (32)
++struct bpm_cons {
++ struct list_head list;
++ char sid[CONSTRAINT_ID_LEN];
++ int count;
++ unsigned long ms;
++ unsigned long tmp_ms;
++ unsigned long tm;
++};
++
++struct bpm_cons_head {
++ struct list_head list;
++};
++
++/* manage all the ops which are supported by the hardware */
++static struct dvfm_op g_dyn_ops[BPM_MAX_OP_NUM];
++static spinlock_t g_dyn_ops_lock = SPIN_LOCK_UNLOCKED;
++
++static struct bpm_cons_head g_bpm_cons[BPM_MAX_OP_NUM];
++
++/* map the op from active ops to g_dyn_ops[] */
++static int g_active_ops_map[BPM_MAX_OP_NUM];
++static int g_active_ops_num;
++static int g_active_cur_idx = -1;
++static int g_prefer_op_idx;
++static int g_active_bonus[BPM_MAX_OP_NUM][BPM_MAX_OP_NUM * 2 - 1];
++struct bpm_freq_policy g_active_policy[BPM_MAX_OP_NUM];
++
++/*****************************************************************************/
++/* */
++/* Framework Supportted Variables */
++/* */
++/*****************************************************************************/
++
++int (*pipm_start_pmu) (void *) = NULL;
++EXPORT_SYMBOL(pipm_start_pmu);
++int (*pipm_stop_pmu)(void) = NULL;
++EXPORT_SYMBOL(pipm_stop_pmu);
++
++static int g_bpm_thread_exit;
++int g_bpm_enabled;
++static wait_queue_head_t g_bpm_enabled_waitq;
++
++static int g_profiler_window = BPM_PROFILER_WINDOW;
++static int g_bpm_log_level = 1;
++struct completion g_bpm_thread_over;
++
++extern struct sysdev_class cpu_sysdev_class;
++
++static struct bpm_event_queue g_bpm_event_queue;
++static spinlock_t g_bpm_event_queue_lock = SPIN_LOCK_UNLOCKED;
++
++#ifdef CONFIG_TEST_BPMD
++static int g_cpuload_mode;
++#endif
++
++static int dvfm_dev_idx;
++
++extern int __dvfm_enable_op(int index, int dev_idx);
++extern int __dvfm_disable_op2(int index, int dev_idx);
++extern int cur_op;
++extern struct info_head dvfm_trace_list;
++
++extern int g_dvfm_disabled;
++
++#ifdef CONFIG_MTD_NAND_HSS_FIX
++extern atomic_t nand_in_cmd;
++#endif
++/*****************************************************************************/
++/* */
++/* Blink Variables */
++/* */
++/*****************************************************************************/
++#define DVFM_BLINK_OWNER_LEN (16)
++
++struct dvfm_blink_info {
++ int time;
++ char name[DVFM_BLINK_OWNER_LEN];
++};
++
++static int g_dvfm_blink = 0;
++static struct timer_list g_dvfm_blink_timer;
++static struct dvfm_blink_info g_dvfm_binfo;
++static unsigned long g_dvfm_blink_timeout = 0;
++
++/*****************************************************************************/
++/* */
++/* android power interface */
++/* */
++/*****************************************************************************/
++static int g_android_suspended = 0;
++
++#ifdef CONFIG_ANDROID_POWER
++void bpm_android_suspend_handler(android_early_suspend_t *h)
++{
++ unsigned long flags;
++ local_irq_save(flags);
++ g_android_suspended = 1;
++ local_irq_restore(flags);
++}
++
++void bpm_android_resume_handler(android_early_suspend_t *h)
++{
++ unsigned long flags;
++ local_irq_save(flags);
++ g_android_suspended = 0;
++ local_irq_restore(flags);
++}
++
++static android_early_suspend_t bpm_early_suspend = {
++ .level = 98,
++ .suspend = bpm_android_suspend_handler,
++ .resume = bpm_android_resume_handler,
++};
++#endif
++
++static inline int is_out_d0cs(void)
++{
++#ifdef CONFIG_PXA3xx_DVFM
++ extern int out_d0cs;
++ return out_d0cs;
++#endif
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD Event Queue */
++/* */
++/*****************************************************************************/
++
++static int bpmq_init(void)
++{
++ g_bpm_event_queue.head = g_bpm_event_queue.tail = 0;
++ g_bpm_event_queue.len = 0;
++ init_waitqueue_head(&g_bpm_event_queue.waitq);
++ return 0;
++}
++
++static int bpmq_clear(void)
++{
++ unsigned long flag;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ g_bpm_event_queue.head = g_bpm_event_queue.tail = 0;
++ g_bpm_event_queue.len = 0;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ return 0;
++}
++
++static int bpmq_get(struct bpm_event *e)
++{
++ unsigned long flag;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ if (!g_bpm_event_queue.len) {
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++ printk(KERN_ERR "Logic error, please check bpmq_empty()\n");
++ return -1;
++ }
++ memcpy(e, g_bpm_event_queue.bpmes + g_bpm_event_queue.tail,
++ sizeof(struct bpm_event));
++ g_bpm_event_queue.len--;
++ g_bpm_event_queue.tail =
++ (g_bpm_event_queue.tail + 1) % MAX_BPM_EVENT_NUM;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ return 0;
++}
++
++static int bpmq_put(struct bpm_event *e)
++{
++ unsigned long flag;
++ static int err_cnt = 0;
++
++ if (unlikely(0 == g_bpm_enabled))
++ return 0;
++
++ spin_lock_irqsave(&g_bpm_event_queue_lock, flag);
++
++ if (g_bpm_event_queue.len == MAX_BPM_EVENT_NUM) {
++ if (++err_cnt > 0) {
++ printk(KERN_ERR "bpm queue over flow!\n");
++ show_state();
++ printk(KERN_ERR "send event many times instantly?");
++ dump_stack();
++ }
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++ return -1;
++ }
++ memcpy(g_bpm_event_queue.bpmes + g_bpm_event_queue.head, e,
++ sizeof(struct bpm_event));
++ g_bpm_event_queue.len++;
++ g_bpm_event_queue.head =
++ (g_bpm_event_queue.head + 1) % MAX_BPM_EVENT_NUM;
++
++ spin_unlock_irqrestore(&g_bpm_event_queue_lock, flag);
++
++ wake_up_interruptible(&g_bpm_event_queue.waitq);
++
++ return 0;
++}
++
++static __inline int bpmq_empty(void)
++{
++ return (g_bpm_event_queue.len > 0) ? 0 : 1;
++}
++
++int bpm_event_notify(int type, int kind, void *info, unsigned int info_len)
++{
++ struct bpm_event event;
++ int len = 0;
++
++ if (info_len > INFO_SIZE)
++ len = INFO_SIZE;
++ else if ((info_len < INFO_SIZE) && (info_len > 0))
++ len = info_len;
++ memset(&event, 0, sizeof(struct bpm_event));
++ event.type = type;
++ event.kind = kind;
++ if ((len > 0) && (info != NULL)) {
++ memcpy(event.info, info, len);
++ }
++ if (0 != bpmq_put(&event)) {
++ len = -1;
++ }
++
++/* DPRINTK("type: %d kind: %d, len(ret): %d\n", type, kind, len); */
++ return len;
++}
++
++EXPORT_SYMBOL(bpm_event_notify);
++
++/*****************************************************************************/
++/* */
++/* BPMD PMU Interface */
++/* */
++/*****************************************************************************/
++
++static int bpm_start_pmu(void)
++{
++ int ret = -ENXIO;
++ struct ipm_profiler_arg pmu_arg;
++
++ if (pipm_start_pmu != NULL) {
++ pmu_arg.size = sizeof(struct ipm_profiler_arg);
++/* pmu_arg.flags = IPM_IDLE_PROFILER | IPM_PMU_PROFILER; */
++ pmu_arg.flags = IPM_IDLE_PROFILER;
++ pmu_arg.window_size = g_profiler_window;
++
++ pmu_arg.pmn0 = PXA3xx_EVENT_EXMEM;
++ pmu_arg.pmn1 = PXA3xx_EVENT_DMC_NOT_EMPTY;
++ pmu_arg.pmn2 = PMU_EVENT_POWER_SAVING;
++ pmu_arg.pmn3 = PMU_EVENT_POWER_SAVING;
++
++ ret = pipm_start_pmu(&pmu_arg);
++ } else {
++ printk(KERN_CRIT "No profiler\n");
++ PM_BUG_ON(1);
++ }
++
++ return ret;
++}
++
++static int bpm_stop_pmu(void)
++{
++ pipm_stop_pmu();
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD POLICY */
++/* */
++/*****************************************************************************/
++
++static int bpm_dump_policy(void)
++{
++#define TMP_BUF_SIZE (4096)
++ int i, j;
++ char *buf = kmalloc(TMP_BUF_SIZE, GFP_KERNEL);
++ char *s = NULL;
++
++ if (NULL == buf) {
++ printk(KERN_ERR "Can not alloc memory\n");
++ return 0;
++ }
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "--------------BPM DUMP POLICY BEGIN--------------\n");
++ s += sprintf(s, "dyn_boot_op = %d\n", dvfm_get_defop());
++ s += sprintf(s, "g_active_ops_maps:\n");
++
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i)
++ s += sprintf(s, "%8d ", g_active_ops_map[i]);
++ s += sprintf(s, "\n");
++
++ s += sprintf(s, "g_active_ops_num: %d\n", g_active_ops_num);
++ s += sprintf(s, "g_active_cur_idx: %d\n", g_active_cur_idx);
++
++ s += sprintf(s, "g_active_policy:\n");
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ for (j = 0; j < BPM_FREQ_POLICY_NUM; ++j) {
++ s += sprintf(s, "%8d ", g_active_policy[i].lower[j]);
++ }
++
++ for (j = 0; j < BPM_FREQ_POLICY_NUM; ++j) {
++ s += sprintf(s, "%8d ", g_active_policy[i].higher[j]);
++ }
++ s += sprintf(s, "\n");
++ }
++
++ DPRINTK("%s", buf);
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "g_active_bonus:\n");
++ for (i = 0; i < BPM_MAX_OP_NUM; ++i) {
++ for (j = 0; j < BPM_MAX_OP_NUM * 2 - 1; ++j) {
++ s += sprintf(s, "%8d ", g_active_bonus[i][j]);
++ }
++ s += sprintf(s, "\n");
++ }
++
++ DPRINTK("%s", buf);
++
++ s = buf;
++ memset(s, 0, TMP_BUF_SIZE);
++
++ s += sprintf(s, "g_dyn_ops num: %d\n",
++ sizeof(g_dyn_ops) / sizeof(struct dvfm_op));
++
++ s += sprintf(s, "g_dyn_ops:\n");
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ s += sprintf(s, "%8d %8d %8d %s\n",
++ g_dyn_ops[i].index,
++ g_dyn_ops[i].count,
++ g_dyn_ops[i].cpu_freq, g_dyn_ops[i].name);
++ }
++ s += sprintf(s, "--------------BPM DUMP POLICY END----------------\n");
++
++ DPRINTK("%s", buf);
++
++ kfree(buf);
++ return 0;
++}
++
++static int build_active_ops(void)
++{
++ int i, j;
++ int pre_idx;
++ int cur_idx;
++ int pre_freq, cur_freq, pre_ratio;
++ int m, n;
++
++ memset(g_active_ops_map, -1, sizeof(g_active_ops_map));
++
++ for (i = 0, j = 0; i < BPM_MAX_OP_NUM; ++i) {
++ if (g_dyn_ops[i].count == 0 && g_dyn_ops[i].name != NULL
++ && !dvfm_check_active_op(g_dyn_ops[i].index))
++ g_active_ops_map[j++] = i;
++ }
++
++ g_active_ops_num = j;
++ g_active_cur_idx = -1;
++
++ memset(g_active_bonus, -1, sizeof(g_active_bonus));
++ memset(g_active_policy, -1, sizeof(g_active_policy));
++
++ for (i = 0; i < g_active_ops_num; ++i) {
++ g_active_policy[i].higher[0] = 80;
++ g_active_policy[i].higher[1] = 95;
++ g_active_policy[i].higher[2] = 100;
++
++ if (i == 0) {
++ memset(g_active_policy[i].lower, 0,
++ sizeof(g_active_policy[i].lower));
++ cur_idx = g_active_ops_map[i];
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ if (cur_freq == 60) {
++ g_active_policy[i].higher[0] = 90;
++ }
++ } else {
++ pre_idx = g_active_ops_map[i - 1];
++ cur_idx = g_active_ops_map[i];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 1].higher[0];
++
++ g_active_policy[i].lower[2] = pre_freq * pre_ratio / cur_freq;
++
++ if (i > 1) {
++ pre_idx = g_active_ops_map[i - 2];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 2].higher[0];
++
++ g_active_policy[i].lower[1] = pre_freq * pre_ratio / cur_freq;
++ } else {
++ g_active_policy[i].lower[1] = 0;
++ }
++
++ g_active_policy[i].lower[0] = 0;
++ }
++
++ for (j = 0; j < g_active_ops_num - 1 - i; ++j) {
++ g_active_bonus[i][j] = 0;
++ }
++
++ m = g_active_ops_num - 1;
++ n = 0;
++ for (j = m - i; j < 2 * g_active_ops_num - 1; ++j) {
++ g_active_bonus[i][j] = n < m ? n : m;
++ ++n;
++ }
++
++ }
++
++ g_active_policy[i - 1].higher[0] = 100;
++ g_active_policy[i - 1].higher[1] = 100;
++ g_active_policy[i - 1].higher[2] = 100;
++
++#if REDUCE_624M_DUTYCYCLE
++ cur_idx = g_active_ops_map[i - 1];
++ cur_freq = g_dyn_ops[cur_idx].cpu_freq;
++ if (cur_freq == 624) {
++ if (i > 1) {
++ g_active_policy[i - 2].higher[0] = 96;
++ g_active_policy[i - 2].higher[1] = 100;
++
++ pre_idx = g_active_ops_map[i - 2];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 2].higher[0];
++
++ g_active_policy[i - 1].lower[2] = pre_freq * pre_ratio / cur_freq;
++ }
++ if (i > 2) {
++ g_active_policy[i - 3].higher[1] = 100;
++
++ pre_idx = g_active_ops_map[i - 3];
++ pre_freq = g_dyn_ops[pre_idx].cpu_freq;
++ pre_ratio = g_active_policy[i - 3].higher[0];
++
++ g_active_policy[i - 1].lower[1] = pre_freq * pre_ratio / cur_freq;
++ }
++ }
++#endif
++ return 0;
++}
++
++/*****************************************************************************/
++/* */
++/* Platform Related */
++/* */
++/*****************************************************************************/
++
++int get_op_power_bonus(void)
++{
++ if (0 == g_active_cur_idx)
++ return 1;
++ else
++ return 0;
++}
++
++static int build_dyn_ops(void)
++{
++ int i;
++ int ret;
++ int op_num = 0;
++ int count, x;
++
++ struct op_info *info = NULL;
++ struct op_freq freq;
++
++ op_num = dvfm_op_count();
++ PM_BUG_ON(op_num > BPM_MAX_OP_NUM);
++
++ memset(&g_dyn_ops, -1, sizeof(g_dyn_ops));
++
++ for (i = 0; i < op_num; ++i) {
++ ret = dvfm_get_opinfo(i, &info);
++
++ PM_BUG_ON(ret);
++
++ /* calculate how much bits is set in device word */
++ x = info->device;
++ for (count = 0; x; x = x & (x - 1), count++);
++
++ g_dyn_ops[i].index = i;
++ g_dyn_ops[i].count = count;
++
++ ret = dvfm_get_op_freq(i, &freq);
++ PM_BUG_ON(ret);
++
++ g_dyn_ops[i].cpu_freq = freq.cpu_freq;
++
++ g_dyn_ops[i].name = dvfm_get_op_name(i);
++
++ PM_BUG_ON(!g_dyn_ops[i].name);
++
++ INIT_LIST_HEAD(&(g_bpm_cons[i].list));
++ }
++
++ for (i = op_num; i < BPM_MAX_OP_NUM; ++i) {
++ g_dyn_ops[i].index = -1;
++ g_dyn_ops[i].count = 0;
++ g_dyn_ops[i].cpu_freq = 0;
++ g_dyn_ops[i].name = NULL;
++
++ INIT_LIST_HEAD(&(g_bpm_cons[i].list));
++ }
++
++ return 0;
++}
++
++static int get_dyn_idx(int active_idx)
++{
++ int t;
++ t = g_active_ops_map[active_idx];
++ return g_dyn_ops[t].index;
++}
++
++static int get_cur_freq(void)
++{
++ PM_BUG_ON(g_active_cur_idx == -1);
++ return g_dyn_ops[get_dyn_idx(g_active_cur_idx)].cpu_freq;
++}
++
++static int calc_new_idx(int bonus)
++{
++ int new_idx;
++
++ new_idx =
++ g_active_bonus[g_active_cur_idx][bonus + g_active_ops_num - 1];
++
++ return new_idx;
++}
++
++static int calc_bonus(struct bpm_freq_bonus_arg *parg)
++{
++ int i;
++ int bonus = 0;
++ int mem_stall = parg->mem_stall;
++ int mipsload = parg->mips * 100 / get_cur_freq();
++ int cpuload = mipsload > 100 ? 100 : mipsload;
++
++ PM_BUG_ON(cpuload > 100 || cpuload < 0);
++
++ for (i = 0; i < BPM_FREQ_POLICY_NUM; ++i) {
++ if (cpuload > g_active_policy[g_active_cur_idx].higher[i]) {
++ bonus += 1;
++// break; /* FIX ME: change the freq one by one */
++ }
++ }
++
++ for (i = BPM_FREQ_POLICY_NUM - 1; i >= 0; --i) {
++ if (cpuload < g_active_policy[g_active_cur_idx].lower[i]) {
++ bonus -= 1;
++// break; /* FIX ME: change the freq one by one */
++ }
++ }
++
++ /* memory bound */
++ if (bonus <= 0 && mem_stall > 17)
++ bonus = 1;
++
++ /* change to user_sleep policy ... */
++ if (g_android_suspended && (g_active_cur_idx <= 1))
++ bonus -= 1;
++
++ if (bonus > g_active_ops_num - 1)
++ bonus = g_active_ops_num - 1;
++ else if (bonus < 1 - g_active_ops_num)
++ bonus = 1 - g_active_ops_num;
++
++ return bonus;
++}
++
++/*****************************************************************************/
++/* */
++/* BPMD API */
++/* */
++/*****************************************************************************/
++
++static int bpm_change_op(int cur_idx, int new_idx)
++{
++ int ret;
++ struct dvfm_freqs freqs;
++ unsigned int oscr;
++
++ freqs.old = cur_idx;
++ freqs.new = new_idx;
++ oscr = OSCR;
++ ret = dvfm_set_op(&freqs, freqs.new, RELATION_STICK);
++ oscr = OSCR - oscr;
++ DPRINTK("old: %d cur: %d (tm: %d)\n", cur_idx, new_idx, oscr/325);
++/*
++ DPRINTK("ACCR: 0x%x ACSR: 0x%x AVCR: 0x%x SVCR: 0x%x CVCR: 0x%x\n",
++ ACCR, ACSR, AVCR, SVCR, CVCR);
++*/
++ return ret;
++}
++
++/* this function need to be refatored later? */
++int bpm_disable_op(int dyn_idx, int dev_idx)
++{
++ int i;
++ int ret = 0;
++ int cur_op_idx = -1, op_idx;
++ int next_op_idx = -1, next_active_idx = -1;
++
++ op_idx = g_dyn_ops[dyn_idx].index;
++
++ /* save current op information */
++ if (g_active_cur_idx != -1) {
++ cur_op_idx = get_dyn_idx(g_active_cur_idx);
++ }
++
++ if (!dvfm_check_active_op(op_idx) && g_active_ops_num == 1 &&
++ cur_op_idx == op_idx) {
++ printk(KERN_ERR "Can't disable this op %d\n", op_idx);
++ bpm_dump_policy();
++ return -1;
++ }
++
++ /*
++ * it should be at least two enabled ops here,
++ * otherwise it cannot come here if there is one enabled op.
++ */
++ if ((g_active_cur_idx != -1) && (g_active_ops_num > 1)) {
++ if (g_active_cur_idx == (g_active_ops_num - 1)) {
++ next_op_idx = get_dyn_idx(g_active_cur_idx - 1);
++ PM_BUG_ON((g_active_cur_idx - 1) < 0);
++ if ((g_active_cur_idx - 1) < 0) {
++ printk(KERN_ERR "err: %d %d\n", g_active_cur_idx, g_active_ops_num);
++ bpm_dump_policy();
++ }
++ } else {
++ next_op_idx = get_dyn_idx(g_active_cur_idx + 1);
++ PM_BUG_ON((g_active_cur_idx + 1) > (g_active_ops_num - 1));
++ if ((g_active_cur_idx + 1) > (g_active_ops_num - 1)) {
++ printk(KERN_ERR "err2: %d %d\n", g_active_cur_idx, g_active_ops_num);
++ bpm_dump_policy();
++ }
++ }
++ }
++
++ g_dyn_ops[dyn_idx].count++;
++
++ __dvfm_disable_op2(op_idx, dev_idx);
++
++ if (!dvfm_check_active_op(op_idx) && g_dyn_ops[dyn_idx].count == 1) {
++ build_active_ops();
++ }
++
++ if (cur_op_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == cur_op_idx) {
++ g_active_cur_idx = i;
++ break;
++ }
++ }
++
++ /* the disabled op is previous op, change to another op */
++ if (g_active_cur_idx == -1) {
++
++ /* find next op */
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == next_op_idx) {
++ next_active_idx = i;
++ break;
++ }
++ }
++
++ PM_BUG_ON(cur_op_idx != op_idx);
++ PM_BUG_ON(next_op_idx != get_dyn_idx(next_active_idx));
++ g_active_cur_idx = next_active_idx;
++ ret = bpm_change_op(cur_op_idx, next_op_idx);
++ PM_BUG_ON(ret);
++ }
++ }
++
++ return ret;
++}
++
++int bpm_enable_op(int dyn_idx, int dev_idx)
++{
++ int i, cur_op_idx = -1;
++
++ if (g_dyn_ops[dyn_idx].count <= 0) {
++ printk(KERN_ERR "are you disable this op before?\n");
++ return -1;
++ }
++
++ /* save current op information */
++ if (g_active_cur_idx != -1) {
++ cur_op_idx = get_dyn_idx(g_active_cur_idx);
++ }
++
++ g_dyn_ops[dyn_idx].count--;
++
++ if (g_dyn_ops[dyn_idx].count == 0)
++ build_active_ops();
++
++ __dvfm_enable_op(g_dyn_ops[dyn_idx].index, dev_idx);
++
++ if (cur_op_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == cur_op_idx) {
++ g_active_cur_idx = i;
++ break;
++ }
++ }
++ }
++
++ return 0;
++}
++
++int bpm_enable_op_name(char *name, int dev_idx, char *sid)
++{
++ unsigned long flag;
++ int ret = 0, new_idx = -1;
++ int i, found;
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if (g_dyn_ops[i].name != NULL &&
++ (!strncmp(name, g_dyn_ops[i].name, sizeof(name)))) {
++ ret = bpm_enable_op(i, dev_idx);
++
++ if (!ret) {
++ found = 0;
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ if (!strncmp(p->sid, sid, CONSTRAINT_ID_LEN - 1)) {
++ found = 1;
++ PM_BUG_ON(p->count <= 0);
++ p->count--;
++ if (p->tmp_ms) {
++ p->tm++;
++ p->ms += (OSCR / 3250 - p->tmp_ms);
++ }
++ break;
++ }
++ }
++ PM_BUG_ON(!found);
++ } else {
++ printk(KERN_ERR "%s use PM interface rightly!\n", sid);
++ PM_BUG_ON(1);
++ }
++ break;
++ }
++ }
++
++ if (i == sizeof(g_dyn_ops) / sizeof(struct dvfm_op)) {
++// printk(KERN_ERR "Cannot find and enable op name %s\n", name);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ /* Change to prefrer op */
++ if (g_prefer_op_idx != cur_op && g_active_cur_idx != -1) {
++ for (i = 0; i < g_active_ops_num; ++i) {
++ if (get_dyn_idx(i) == g_prefer_op_idx) {
++ new_idx = i;
++ break;
++ }
++ }
++
++ if (new_idx != -1) {
++ ret = bpm_change_op(get_dyn_idx(g_active_cur_idx), get_dyn_idx(new_idx));
++ if (0 == ret)
++ g_active_cur_idx = new_idx;
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++ }
++ }
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return ret;
++}
++
++int bpm_disable_op_name(char *name, int dev_idx, char *sid)
++{
++ unsigned long flag;
++ int ret = -1;
++ int i;
++ int find = 0;
++ struct list_head *list = NULL;
++ struct bpm_cons *p = NULL;
++
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ for (i = 0; i < sizeof(g_dyn_ops) / sizeof(struct dvfm_op); ++i) {
++ if (g_dyn_ops[i].name != NULL &&
++ (!strncmp(name, g_dyn_ops[i].name, sizeof(name)))) {
++ ret = bpm_disable_op(i, dev_idx);
++
++ if (!ret) {
++ list_for_each(list, &(g_bpm_cons[i].list)) {
++ p = list_entry(list, struct bpm_cons, list);
++ if (!strncmp(p->sid, sid, CONSTRAINT_ID_LEN - 1)) {
++ p->count++;
++ p->tmp_ms = OSCR / 3250;
++ find = 1;
++ break;
++ }
++ }
++
++ if (find == 0) {
++ p = (struct bpm_cons *)kzalloc(sizeof(struct bpm_cons), GFP_KERNEL);
++ strncpy(p->sid, sid, CONSTRAINT_ID_LEN - 1);
++ p->count = 1;
++ list_add_tail(&(p->list), &(g_bpm_cons[i].list));
++ }
++ }
++ break;
++ }
++ }
++
++ if (i == sizeof(g_dyn_ops) / sizeof(struct dvfm_op)) {
++// printk(KERN_ERR "Cannot find and disable op name %s\n", name);
++ }
++
++ PM_BUG_ON((-1 != g_active_cur_idx) && (get_dyn_idx(g_active_cur_idx) != cur_op));
++
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++
++ return ret;
++}
++
++static int handle_profiler_arg(struct bpm_freq_bonus_arg *parg)
++{
++ int bonus;
++ int new_idx;
++ unsigned long flag;
++ int cur_dyn_idx, new_dyn_idx;
++
++ if (g_dvfm_blink)
++ return 0;
++
++ /*
++ * bpm_enable_op_name() and bpm_disable_op_name() will update
++ * g_dyn_ops[] and g_active_xxx[], and then scale the op, so
++ * we need to avoid the conflict.
++ * Below code can not call schedule() indirectly.
++ */
++ spin_lock_irqsave(&g_dyn_ops_lock, flag);
++
++ if (0 == g_bpm_enabled) {
++ spin_unlock_irqrestore(&g_dyn_ops_lock, flag);
++ return 0;
++ }
++