This is the Dynamic Power Management Core, as released by the upstream
project for 2.6.16.

Index: linux-2.6.16/arch/arm/Kconfig
===================================================================
--- linux-2.6.16.orig/arch/arm/Kconfig	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/arch/arm/Kconfig	2006-04-11 06:34:10.000000000 +0000
@@ -832,3 +832,5 @@
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "drivers/dpm/Kconfig"
Index: linux-2.6.16/arch/i386/Kconfig
===================================================================
--- linux-2.6.16.orig/arch/i386/Kconfig	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/arch/i386/Kconfig	2006-04-11 06:34:10.000000000 +0000
@@ -908,6 +908,7 @@
 endmenu
 
 source "arch/i386/kernel/cpu/cpufreq/Kconfig"
+source "arch/i386/kernel/cpu/dpm/Kconfig"
 
 endmenu
 
Index: linux-2.6.16/arch/i386/kernel/cpu/Makefile
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/cpu/Makefile	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/cpu/Makefile	2006-04-11 06:34:10.000000000 +0000
@@ -17,3 +17,4 @@
 
 obj-$(CONFIG_MTRR)	+= 	mtrr/
 obj-$(CONFIG_CPU_FREQ)	+=	cpufreq/
+obj-$(CONFIG_DPM)	+=	dpm/
Index: linux-2.6.16/arch/i386/kernel/cpu/dpm/Kconfig
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/cpu/dpm/Kconfig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/cpu/dpm/Kconfig	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,10 @@
+#
+# Dynamic Power Management
+#
+
+source "drivers/dpm/Kconfig"
+
+config DPM_CENTRINO
+       tristate "DPM for Intel Centrino Enhanced Speedstep"
+       depends on DPM
+       default n
Index: linux-2.6.16/arch/i386/kernel/cpu/dpm/Makefile
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/cpu/dpm/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/cpu/dpm/Makefile	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DPM)		+=	x86_dpm.o
+obj-$(CONFIG_DPM_CENTRINO)	+=	centrino_dpm.o
+
Index: linux-2.6.16/arch/i386/kernel/cpu/dpm/centrino_dpm.c
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/cpu/dpm/centrino_dpm.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/cpu/dpm/centrino_dpm.c	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,133 @@
+/*
+ * 2003 (c) MontaVista Software, Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Based on speedstep-centrino.c by Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+
+#include <linux/config.h>
+#include <linux/dpm.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+
+/* Extract clock in kHz from PERF_CTL value */
+static unsigned extract_clock(unsigned msr)
+{
+	msr = (msr >> 8) & 0xff;
+	return msr * 100000;
+}
+
+/* Return the current CPU frequency in kHz */
+static unsigned get_cur_freq(void)
+{
+	unsigned l, h;
+
+	rdmsr(MSR_IA32_PERF_STATUS, l, h);
+	return extract_clock(l);
+}
+
+static int
+dpm_centrino_init_opt(struct dpm_opt *opt)
+{
+	int v		= opt->pp[DPM_MD_V];
+	int cpu		= opt->pp[DPM_MD_CPU_FREQ];
+
+	struct dpm_md_opt *md_opt = &opt->md_opt;
+
+	md_opt->v = v;
+	md_opt->cpu = cpu;
+	return 0;
+}
+
+/* Fully determine the current machine-dependent operating point, and fill in a
+   structure presented by the caller. This should only be called when the
+   dpm_sem is held. This call can return an error if the system is currently at
+   an operating point that could not be constructed by dpm_md_init_opt(). */
+
+static int
+dpm_centrino_get_opt(struct dpm_opt *opt)
+{
+	struct dpm_md_opt *md_opt = &opt->md_opt;
+
+	md_opt->v = 100; /* TODO. */
+	md_opt->cpu = get_cur_freq();
+	return 0;
+}
+
+static int
+dpm_centrino_set_opt(struct dpm_md_opt *md_opt)
+{
+	unsigned int msr = 0, oldmsr, h, mask = 0;
+
+	if (md_opt->cpu != -1) {
+		msr |= ((md_opt->cpu)/100) << 8;
+		mask |= 0xff00;
+	}
+
+	if (md_opt->v != -1) {
+		msr |= ((md_opt->v - 700) / 16);
+		mask |= 0xff;
+	}
+
+	rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+
+	if (msr == (oldmsr & mask))
+		return 0;
+
+	/* all but 16 LSB are "reserved", so treat them with
+	   care */
+	oldmsr &= ~mask;
+	msr &= mask;
+	oldmsr |= msr;
+
+	wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+	return 0;
+}
+
+static int dpm_centrino_startup(void)
+{
+	struct cpuinfo_x86 *cpu = cpu_data;
+	unsigned l, h;
+
+	if (!cpu_has(cpu, X86_FEATURE_EST))
+		return -ENODEV;
+
+	/* Check to see if Enhanced SpeedStep is enabled, and try to
+	   enable it if not. */
+	rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+	if (!(l & (1<<16))) {
+		l |= (1<<16);
+		wrmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+		/* check to see if it stuck */
+		rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+		if (!(l & (1<<16))) {
+			printk(KERN_INFO "DPM: Couldn't enable Enhanced SpeedStep\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+int __init dpm_centrino_init(void)
+{
+	printk("Dynamic Power Management for Intel Centrino Enhanced SpeedStep.\n");
+
+	dpm_bd.startup = dpm_centrino_startup;
+	dpm_bd.init_opt = dpm_centrino_init_opt;
+	dpm_bd.get_opt = dpm_centrino_get_opt;
+	dpm_bd.set_opt = dpm_centrino_set_opt;
+	return 0;
+}
+
+__initcall(dpm_centrino_init);
Index: linux-2.6.16/arch/i386/kernel/cpu/dpm/x86_dpm.c
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/cpu/dpm/x86_dpm.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/cpu/dpm/x86_dpm.c	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,133 @@
+/*
+ * 2003 (c) MontaVista Software, Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/dpm.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/uaccess.h>
+
+struct dpm_bd dpm_bd;
+
+static int
+dpm_x86_init_opt(struct dpm_opt *opt)
+{
+	return dpm_bd.init_opt ? dpm_bd.init_opt(opt) : -1;
+}
+
+/* Fully determine the current machine-dependent operating point, and fill in a
+   structure presented by the caller. This should only be called when the
+   dpm_sem is held. This call can return an error if the system is currently at
+   an operating point that could not be constructed by dpm_md_init_opt(). */
+
+static unsigned long loops_per_jiffy_ref = 0;
+
+static int
+dpm_x86_get_opt(struct dpm_opt *opt)
+{
+	return dpm_bd.get_opt ? dpm_bd.get_opt(opt) : -1;
+}
+
+int
+dpm_x86_set_opt(struct dpm_opt *cur, struct dpm_opt *new)
+{
+	struct cpuinfo_x86 *cpu = cpu_data;
+
+	if (! new->md_opt.cpu) {
+#ifdef CONFIG_PM
+		pm_suspend(PM_SUSPEND_STANDBY);
+
+	 	/* Here when we wake up.  Recursive call to switch back to
+		 * to task state.
+		 */
+
+		dpm_set_os(DPM_TASK_STATE);
+#endif
+		return 0;
+	}
+
+	if (dpm_bd.set_opt){
+	 	dpm_bd.set_opt(&new->md_opt);
+
+	}else{
+		return -1;
+	}
+
+       if (cur->md_opt.cpu && new->md_opt.cpu){
+		loops_per_jiffy_ref = cpu->loops_per_jiffy;
+                cpu->loops_per_jiffy =
+			dpm_compute_lpj(loops_per_jiffy_ref ,
+                                                  cur->md_opt.cpu,
+                                                  new->md_opt.cpu);
+
+		loops_per_jiffy = cpu->loops_per_jiffy;
+		if (cpu_khz)
+			cpu_khz = dpm_compute_lpj(cpu_khz,
+                                                  cur->md_opt.cpu,
+                                                  new->md_opt.cpu);
+	}
+	return 0;
+}
+
+/*
+ * idle loop
+ */
+
+static void (*orig_idle)(void);
+
+void dpm_x86_idle(void)
+{
+	extern void default_idle(void);
+
+	if (orig_idle)
+		orig_idle();
+	else
+		default_idle();
+}
+
+/****************************************************************************
+ * Initialization/Exit
+ ****************************************************************************/
+
+void
+dpm_x86_startup(void)
+{
+	orig_idle = pm_idle;
+	pm_idle = dpm_idle;
+
+	if (dpm_bd.startup)
+		dpm_bd.startup();
+}
+
+void
+dpm_x86_cleanup(void)
+{
+	pm_idle = orig_idle;
+
+	if (dpm_bd.cleanup)
+		dpm_bd.cleanup();
+}
+
+int __init
+dpm_x86_init(void)
+{
+	printk("Dynamic Power Management for x86.\n");
+
+	dpm_md.init_opt		= dpm_x86_init_opt;
+	dpm_md.set_opt		= dpm_x86_set_opt;
+	dpm_md.get_opt		= dpm_x86_get_opt;
+	dpm_md.idle		= dpm_x86_idle;
+	dpm_md.startup		= dpm_x86_startup;
+	dpm_md.cleanup		= dpm_x86_cleanup;
+	return 0;
+}
+__initcall(dpm_x86_init);
Index: linux-2.6.16/arch/i386/kernel/process.c
===================================================================
--- linux-2.6.16.orig/arch/i386/kernel/process.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/arch/i386/kernel/process.c	2006-04-11 06:34:10.000000000 +0000
@@ -58,6 +58,8 @@
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
 
+#include <linux/dpm.h>
+
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 static int hlt_counter;
@@ -697,6 +699,7 @@
 
 	disable_tsc(prev_p, next_p);
 
+	dpm_set_os(next_p->dpm_state);
 	return prev_p;
 }
 
Index: linux-2.6.16/drivers/Makefile
===================================================================
--- linux-2.6.16.orig/drivers/Makefile	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/Makefile	2006-04-11 06:34:10.000000000 +0000
@@ -67,6 +67,7 @@
 obj-$(CONFIG_MCA)		+= mca/
 obj-$(CONFIG_EISA)		+= eisa/
 obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
+obj-$(CONFIG_DPM)		+= dpm/
 obj-$(CONFIG_MMC)		+= mmc/
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
Index: linux-2.6.16/drivers/base/core.c
===================================================================
--- linux-2.6.16.orig/drivers/base/core.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/base/core.c	2006-04-11 06:34:10.000000000 +0000
@@ -282,6 +282,8 @@
 	if (parent)
 		klist_add_tail(&dev->knode_parent, &parent->klist_children);
 
+	assert_constraints(dev->constraints);
+
 	/* notify platform of device entry */
 	if (platform_notify)
 		platform_notify(dev);
@@ -367,6 +369,8 @@
 		klist_del(&dev->knode_parent);
 	device_remove_file(dev, &dev->uevent_attr);
 
+	deassert_constraints(dev->constraints);
+
 	/* Notify the platform of the removal, in case they
 	 * need to do anything...
 	 */
Index: linux-2.6.16/drivers/base/power/Makefile
===================================================================
--- linux-2.6.16.orig/drivers/base/power/Makefile	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/base/power/Makefile	2006-04-11 06:34:10.000000000 +0000
@@ -1,4 +1,4 @@
-obj-y			:= shutdown.o
+obj-y			:= shutdown.o power-dpm.o
 obj-$(CONFIG_PM)	+= main.o suspend.o resume.o runtime.o sysfs.o
 
 ifeq ($(CONFIG_DEBUG_DRIVER),y)
Index: linux-2.6.16/drivers/base/power/power-dpm.c
===================================================================
--- linux-2.6.16.orig/drivers/base/power/power-dpm.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/base/power/power-dpm.c	2006-04-11 06:35:28.000000000 +0000
@@ -0,0 +1,473 @@
+/*
+ * power-dpm.c -- Dynamic Power Management LDM power hooks
+ *
+ * (c) 2003 MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express or
+ * implied.
+ */
+
+#include <linux/device.h>
+#include <linux/pm.h>
+#include <linux/dpm.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "power.h"
+
+/*
+ * power hotplug events
+ */
+
+#define BUFFER_SIZE	1024	/* should be enough memory for the env */
+#define NUM_ENVP	32	/* number of env pointers */
+static unsigned long sequence_num;
+static spinlock_t sequence_lock = SPIN_LOCK_UNLOCKED;
+
+void power_event(char *eventstr)
+{
+	char *argv [3];
+	char **envp = NULL;
+	char *buffer = NULL;
+	char *scratch;
+	int i = 0;
+	int retval;
+	unsigned long seq;
+
+	if (!uevent_helper[0])
+		return;
+
+	envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
+	if (!envp)
+		return;
+	memset (envp, 0x00, NUM_ENVP * sizeof (char *));
+
+	buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
+	if (!buffer)
+		goto exit;
+
+	argv [0] = uevent_helper;
+	argv [1] = "power";
+	argv [2] = 0;
+
+	/* minimal command environment */
+	envp [i++] = "HOME=/";
+	envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+	scratch = buffer;
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "ACTION=event") + 1;
+
+	spin_lock(&sequence_lock);
+	seq = sequence_num++;
+	spin_unlock(&sequence_lock);
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "SEQNUM=%ld", seq) + 1;
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "EVENT=%s", eventstr) + 1;
+
+	pr_debug ("%s: %s %s %s %s %s %s %s\n", __FUNCTION__, argv[0], argv[1],
+		  envp[0], envp[1], envp[2], envp[3], envp[4]);
+	retval = call_usermodehelper (argv[0], argv, envp, 0);
+	if (retval)
+		pr_debug ("%s - call_usermodehelper returned %d\n",
+			  __FUNCTION__, retval);
+
+exit:
+	kfree(buffer);
+	kfree(envp);
+	return;
+}
+
+void device_power_event(struct device * dev, char *eventstr)
+{
+	char *argv [3];
+	char **envp = NULL;
+	char *buffer = NULL;
+	char *scratch;
+	int i = 0;
+	int retval;
+	unsigned long seq;
+
+	if (!uevent_helper[0])
+		return;
+
+	envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
+	if (!envp)
+		return;
+	memset (envp, 0x00, NUM_ENVP * sizeof (char *));
+
+	buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
+	if (!buffer)
+		goto exit;
+
+	argv [0] = uevent_helper;
+	argv [1] = "power";
+	argv [2] = 0;
+
+	/* minimal command environment */
+	envp [i++] = "HOME=/";
+	envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+	scratch = buffer;
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "ACTION=device-event") + 1;
+
+	spin_lock(&sequence_lock);
+	seq = sequence_num++;
+	spin_unlock(&sequence_lock);
+
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "SEQNUM=%ld", seq) + 1;
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "DEVICE=%s", dev->bus_id) + 1;
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "EVENT=%s", eventstr) + 1;
+	envp [i++] = scratch;
+	scratch += sprintf(scratch, "SUBSYSTEM=power") + 1;
+
+	pr_debug ("%s: %s %s %s %s %s %s %s %s %2\n", __FUNCTION__, argv[0], argv[1],
+		  envp[0], envp[1], envp[2], envp[3], envp[4], envp[5],
+		  envp[6]);
+	retval = call_usermodehelper (argv[0], argv, envp, 0);
+	if (retval)
+		pr_debug ("%s - call_usermodehelper returned %d\n",
+			  __FUNCTION__, retval);
+
+exit:
+	kfree(buffer);
+	kfree(envp);
+	return;
+}
+
+/*
+ * Device constraints
+ */
+
+#ifdef CONFIG_DPM
+LIST_HEAD(dpm_constraints);
+DECLARE_MUTEX(dpm_constraints_sem);
+
+void assert_constraints(struct constraints *constraints)
+{
+	if (! constraints || constraints->asserted)
+		return;
+
+	down(&dpm_constraints_sem);
+	constraints->asserted = 1;
+	list_add_tail(&constraints->entry, &dpm_constraints);
+	up(&dpm_constraints_sem);
+
+	/* DPM-PM-TODO: Check against DPM state. */
+
+}
+
+
+void deassert_constraints(struct constraints *constraints)
+{
+	if (! constraints || ! constraints->asserted)
+		return;
+
+	down(&dpm_constraints_sem);
+	constraints->asserted = 0;
+	list_del_init(&constraints->entry);
+	up(&dpm_constraints_sem);
+}
+
+
+EXPORT_SYMBOL(assert_constraints);
+EXPORT_SYMBOL(deassert_constraints);
+
+static ssize_t
+constraints_show(struct device * dev, struct device_attribute *attr,
+		 char * buf)
+{
+	int i, cnt = 0;
+
+	if (dev->constraints) {
+		for (i = 0; i < dev->constraints->count; i++) {
+			cnt += sprintf(buf + cnt,"%s: min=%d max=%d\n",
+				       dpm_param_names[dev->constraints->param[i].id],
+				       dev->constraints->param[i].min,
+				       dev->constraints->param[i].max);
+		}
+
+		cnt += sprintf(buf + cnt,"asserted=%s violations=%d\n",
+			       dev->constraints->asserted ?
+			       "yes" : "no", dev->constraints->violations);
+	} else {
+		cnt += sprintf(buf + cnt,"none\n");
+	}
+
+	return cnt;
+}
+
+static ssize_t
+constraints_store(struct device * dev, struct device_attribute *attr,
+		  const char * buf, size_t count)
+{
+	int num_args, paramid, min, max;
+	int cidx;
+	const char *cp, *paramname;
+	int paramnamelen;
+	int provisional = 0;
+	int ret = 0;
+
+	if (!dev->constraints) {
+		if (! (dev->constraints = kmalloc(sizeof(struct constraints),
+						  GFP_KERNEL)))
+			return -EINVAL;
+
+		memset(dev->constraints, 0,
+		       sizeof(struct constraints));
+		provisional = 1;
+	}
+
+	cp = buf;
+	while((cp - buf < count) && *cp && (*cp == ' '))
+		cp++;
+
+	paramname = cp;
+
+	while((cp - buf < count) && *cp && (*cp != ' '))
+		cp++;
+
+	paramnamelen = cp - paramname;
+	num_args = sscanf(cp, "%d %d", &min, &max);
+
+	if (num_args != 2) {
+		printk("DPM: Need 2 integer parameters for constraint min/max.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (paramid = 0; paramid < DPM_PP_NBR; paramid++) {
+		if (strncmp(paramname, dpm_param_names[paramid], paramnamelen) == 0)
+			break;
+	}
+
+	if (paramid >= DPM_PP_NBR) {
+		printk("DPM: Unknown power parameter name in device constraints\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (cidx = 0; cidx < dev->constraints->count; cidx++)
+		/*
+		 * If the new range overlaps an existing range,
+		 * modify the existing one.
+		 */
+
+		if ((dev->constraints->param[cidx].id == paramid) &&
+		    ((max == -1) || 
+		     (max >= dev->constraints->param[cidx].min)) &&
+		    ((min == -1) ||
+		     (min <= dev->constraints->param[cidx].max)))
+			break;
+
+	if (cidx >= DPM_CONSTRAINT_PARAMS_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Error if max is less than min */
+	if (max < min) {
+		printk("DPM: Max value of the constraint should not be less than min\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dev->constraints->param[cidx].id = paramid;
+	dev->constraints->param[cidx].max = max;
+	dev->constraints->param[cidx].min = min;
+
+	if (cidx == dev->constraints->count)
+		dev->constraints->count++;
+
+	/* New constraints should start off with same state as power
+	   state */
+	if (provisional && (dev->power.power_state.event == PM_EVENT_ON))
+		assert_constraints(dev->constraints);
+
+out:
+
+	if (provisional && (ret < 0)) {
+		kfree(dev->constraints);
+		dev->constraints = NULL;
+	}
+
+	return ret < 0 ? ret : count;
+}
+
+DEVICE_ATTR(constraints,S_IWUSR | S_IRUGO,
+            constraints_show,constraints_store);
+
+#else /* CONFIG_DPM */
+void assert_constraints(struct constraints *constraints)
+{
+}
+
+void deassert_constraints(struct constraints *constraints)
+{
+}
+#endif /* CONFIG_DPM */
+
+#ifdef CONFIG_DPM
+
+/*
+ * Driver scale callbacks
+ */
+
+static struct notifier_block *dpm_scale_notifier_list[SCALE_MAX];
+static DECLARE_MUTEX(dpm_scale_sem);
+
+/* This function may be called by the platform frequency scaler before
+   or after a frequency change, in order to let drivers adjust any
+   clocks or calculations for the new frequency. */
+
+void dpm_driver_scale(int level, struct dpm_opt *newop)
+{
+	if (down_trylock(&dpm_scale_sem))
+		return;
+
+	notifier_call_chain(&dpm_scale_notifier_list[level], level, newop);
+	up(&dpm_scale_sem);
+}
+
+void dpm_register_scale(struct notifier_block *nb, int level)
+{
+	down(&dpm_scale_sem);
+	notifier_chain_register(&dpm_scale_notifier_list[level], nb);
+	up(&dpm_scale_sem);
+}
+
+void dpm_unregister_scale(struct notifier_block *nb, int level)
+{
+	down(&dpm_scale_sem);
+	notifier_chain_unregister(&dpm_scale_notifier_list[level], nb);
+	up(&dpm_scale_sem);
+}
+
+int dpm_constraint_rejects = 0;
+
+
+int
+dpm_default_check_constraint(struct constraint_param *param,
+			     struct dpm_opt *opt)
+{
+	return (opt->pp[param->id] == -1) ||
+		((param->min == -1 || opt->pp[param->id] >= param->min) &&
+		 (param->max == -1 || opt->pp[param->id] <= param->max));
+}
+
+static int
+dpm_check_a_constraint(struct constraints *constraints, struct dpm_opt *opt)
+{
+	int i;
+	int failid = -1;
+	int ppconstraint[DPM_PP_NBR];
+
+
+	if (! constraints || !constraints->asserted)
+		return 1;
+
+	/*
+	 * ppconstraint[ppid] == 0  means power param has not been checked
+	 *                          for a constraint
+	 *                    == -1 means power param has matched a constraint
+	 *                     > 0  means constraint #n-1 mismatched
+	 *
+	 * failid == pp id of (a) failed constraint
+	 */
+
+	memset(ppconstraint, 0, sizeof(ppconstraint));
+
+	for (i = 0; i < constraints->count; i++) {
+		struct constraint_param *param = &constraints->param[i];
+
+		if (! dpm_md_check_constraint(param, opt)) {
+			if (ppconstraint[param->id] == 0) {
+				failid = param->id;
+				ppconstraint[failid] = i+1;
+			}
+		} else
+			ppconstraint[param->id] = -1;
+	}
+
+	if ((failid >= 0) && (ppconstraint[failid] > 0)) {
+#ifdef CONFIG_DPM_TRACE
+		struct constraint_param *param =
+			&constraints->param[ppconstraint[failid]-1];
+
+		dpm_trace(DPM_TRACE_CONSTRAINT_ASSERTED,
+			  param->id, param->min, param->max,
+			  opt);
+#endif
+		return 0;
+	}
+
+	return 1;
+}
+
+int dpm_check_constraints(struct dpm_opt *opt)
+{
+	struct list_head * entry;
+	int valid = 1;
+
+	list_for_each(entry,&dpm_constraints) {
+		struct constraints *constraints =
+			list_entry(entry, struct constraints, entry);
+		if (!dpm_check_a_constraint(constraints, opt)) {
+			constraints->violations++;
+			dpm_constraint_rejects++;
+			valid = 0;
+		}
+	}
+
+	return valid;
+}
+
+int dpm_show_opconstraints(struct dpm_opt *opt, char * buf)
+{
+#ifdef CONFIG_PM
+	struct list_head * entry;
+	int len = 0;
+
+	list_for_each_prev(entry,&dpm_active) {
+		struct device * dev = to_device(entry);
+
+		if (!dpm_check_a_constraint(dev->constraints, opt)) {
+			len += sprintf(buf + len, "%s/%s\n", dev->bus->name,
+				       dev->bus_id);
+		}
+	}
+
+	return len;
+#else /* CONFIG_PM */
+	return 0;
+#endif /* CONFIG_PM */
+}
+
+void dpm_force_off_constrainers(struct dpm_opt *opt)
+{
+#ifdef CONFIG_PM
+	struct list_head * entry;
+
+	list_for_each_prev(entry,&dpm_active) {
+		struct device * dev = to_device(entry);
+
+		if (!dpm_check_a_constraint(dev->constraints, opt)) {
+			suspend_device(dev, PMSG_SUSPEND);
+		}
+	}
+#endif
+}
+
+EXPORT_SYMBOL(dpm_force_off_constrainers);
+#endif /* CONFIG_DPM */
+
Index: linux-2.6.16/drivers/base/power/resume.c
===================================================================
--- linux-2.6.16.orig/drivers/base/power/resume.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/base/power/resume.c	2006-04-11 06:34:10.000000000 +0000
@@ -34,6 +34,8 @@
 	if (dev->bus && dev->bus->resume) {
 		dev_dbg(dev,"resuming\n");
 		error = dev->bus->resume(dev);
+		if (!error)
+			assert_constraints(dev->constraints);
 	}
 	up(&dev->sem);
 	return error;
Index: linux-2.6.16/drivers/base/power/suspend.c
===================================================================
--- linux-2.6.16.orig/drivers/base/power/suspend.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/base/power/suspend.c	2006-04-11 06:34:10.000000000 +0000
@@ -57,6 +57,9 @@
 	if (dev->bus && dev->bus->suspend && !dev->power.power_state.event) {
 		dev_dbg(dev, "suspending\n");
 		error = dev->bus->suspend(dev, state);
+
+		if (! error)
+			deassert_constraints(dev->constraints);
 	}
 	up(&dev->sem);
 	return error;
Index: linux-2.6.16/drivers/base/power/sysfs.c
===================================================================
--- linux-2.6.16.orig/drivers/base/power/sysfs.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/drivers/base/power/sysfs.c	2006-04-11 06:34:10.000000000 +0000
@@ -56,7 +56,6 @@
 
 static DEVICE_ATTR(state, 0644, state_show, state_store);
 
-
 /*
  *	wakeup - Report/change current wakeup option for device
  *
@@ -128,10 +127,14 @@
 
 static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
 
+extern struct device_attribute dev_attr_constraints;
 
 static struct attribute * power_attrs[] = {
 	&dev_attr_state.attr,
 	&dev_attr_wakeup.attr,
+#ifdef CONFIG_DPM
+	&dev_attr_constraints.attr,
+#endif
 	NULL,
 };
 static struct attribute_group pm_attr_group = {
Index: linux-2.6.16/drivers/dpm/Kconfig
===================================================================
--- linux-2.6.16.orig/drivers/dpm/Kconfig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/Kconfig	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,43 @@
+#
+# Dynamic Power Management
+#
+
+menu "Dynamic Power Management"
+
+config DPM
+	bool "Dynamic Power Management"
+	help
+	  Enable Dynamic Power Management, if implemented for your platform.
+	  DPM conserves power by adjusting power parameters according to
+	  system state (such as idle, running a high-power-usage task, etc.)
+	  and enables associated power management features such as device
+	  constraints on power parameters.  DPM relies on power policy and
+	  machine-dependent power operating points and such to be configured
+	  from userspace after boot.
+
+	  If in doubt, say N.
+
+config DPM_STATS
+	bool "  Enable DPM Statistics Gathering"
+	depends on DPM
+	help
+	  This enables gathering and reporting statistics for DPM.
+	  This can be useful during development of DPM platform code or
+	  in other situations where information on the operation of DPM is
+	  needed.
+
+	  If in doubt, say N.
+
+
+config DPM_PROCFS
+	bool "  Enable old DPM /proc interface (deprecated)"
+	depends on DPM && PROC_FS
+	help
+	  This enables the /proc/driver/dpm interface for controlling
+	  DPM.  Please note that it is recommended to use the sysfs
+	  interface instead (which is built automatically).
+
+	  If in doubt, say N.
+
+endmenu
+
Index: linux-2.6.16/drivers/dpm/Makefile
===================================================================
--- linux-2.6.16.orig/drivers/dpm/Makefile	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/Makefile	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,7 @@
+#
+# Makefile for the kernel DPM driver.
+#
+
+obj-$(CONFIG_DPM)		+= dpm.o dpm-idle.o dpm-ui.o
+obj-$(CONFIG_DPM_PROCFS)	+= proc.o
+
Index: linux-2.6.16/drivers/dpm/dpm-idle.c
===================================================================
--- linux-2.6.16.orig/drivers/dpm/dpm-idle.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/dpm-idle.c	2006-04-11 06:34:10.000000000 +0000
@@ -0,0 +1,167 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, MontaVista Software <source@mvista.com>.
+ *
+ * Based on ibm405lp_dpm.c by Bishop Brock, Copyright (C) 2002,
+ * International Business Machines Corporation.
+ */
+
+#include <linux/config.h>
+#include <linux/dpm.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+
+#include <asm/delay.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/****************************************************************************
+ *  DPM Idle Handler
+ ****************************************************************************/
+
+/*
+   The idle handler is one of the most important parts of DPM, as very
+   significant amounts of energy are saved by moving to a low-power idle state
+   whenever possible.  The basic coding of the core of this routine is simply:
+
+   dpm_set_os(DPM_IDLE_STATE);
+   machine-dependent-idle-routine();
+   dpm_set_os(DPM_IDLE_TASK_STATE);
+
+   The added complexity found here is introduced to avoid unnecessary work, and
+   especially to reduce the latencies associated with going in and out of idle.
+   Idle power can be greatly reduced by moving to a very low-frequency
+   operating point, but we also need to be aware of the impact on interrupt
+   latencies.  The DPM implementation of idle attempts to balance these
+   competing needs.
+
+   We support 2 "idle" states: DPM_IDLE_TASK_STATE and DPM_IDLE_STATE.  The
+   idle thread is marked as a "no-state" task, so that operating point changes
+   are not automatically made when the idle thread is scheduled. The
+   "idle-task" state is used for the majority of the idle thread.  Interrupts
+   that occur during idle are handled in this state as well. The "idle" state
+   is only entered from the idle-task state, and only for the express purpose
+   of allowing an ultra-low-power operating point.
+
+   The introduction of the idle-task state supports a stepped voltage and
+   frequency scaling at idle.  On the IBM 405LP we would not want to go from,
+   e.g., 266/133 @ 1.8 V directly to 8/8 @ 1.0 V and back.  Why not?  Because
+   we would get "stuck" at 8MHz even though we need to wake up and resume
+   useful work, e.g., we would have to set the 266/133 operating point while
+   running at 8/8.  So instead when going idle first step down to idle-task,
+   e.g., 100/50 @ 1.0 V, and then step down to e.g. 8/8 to halt.  The interrupt
+   that takes us out of idle takes us back to idle-task (100/50) for interrupt
+   processing and the potential return to 266/133.
+
+   The best policies for this implementation will be able to transition between
+   idle-task and idle without voltage scaling or driver notification. In these
+   cases the transitions are handled with minimal latency by simple frequency
+   scaling. */
+
+static inline void
+quick_idle(void)
+{
+	dpm_quick_enter_state(DPM_IDLE_STATE);
+	dpm_md_idle();
+	dpm_quick_enter_state(DPM_IDLE_TASK_STATE);
+}
+
+static void
+full_idle(struct dpm_opt *idle_task_opt, struct dpm_opt *idle_opt)
+{
+	dpm_quick_enter_state(DPM_IDLE_STATE);
+#ifdef CONFIG_DPM_STATS
+	dpm_update_stats(&idle_opt->stats, &idle_task_opt->stats);
+#endif
+	dpm_set_opt(idle_opt, DPM_SYNC);
+	dpm_md_idle();
+	dpm_set_opt(idle_task_opt, DPM_SYNC);
+	dpm_quick_enter_state(DPM_IDLE_TASK_STATE);
+#ifdef CONFIG_DPM_STATS
+	dpm_update_stats(&idle_task_opt->stats, &idle_opt->stats);
+#endif
+}
+
+
+/* If DPM is currently disabled here we simply do the standard
+   idle wait.
+
+   If we're not actually in DPM_IDLE_TASK_STATE, we need to go back and get
+   into this state.  This could happen in rare instances - an interrupt between
+   dpm_set_os() and the critical section.
+
+   If we are not yet at the idle-task operating point, or if there is no
+   difference between idle-task and idle, we can enter/exit the idle state
+   quickly since it's only for statistical purposes.  This is also true if for
+   some reason we can't get the DPM lock, since obviously an asynchronous event
+   is going to have to occur to clear the lock, and this event is going to take
+   us out of idle.
+
+   Otherwise the full idle shutdown is done. */
+
+
+void
+dpm_idle(void)
+{
+	unsigned long flags;
+	struct dpm_opt *idle_task_opt, *idle_opt;
+
+	current->dpm_state = DPM_NO_STATE;
+	dpm_set_os(DPM_IDLE_TASK_STATE);
+	local_irq_save(flags);
+
+	if (! need_resched()) {
+		if (!dpm_enabled) {
+			dpm_md_idle();
+
+		} else if (dpm_active_state != DPM_IDLE_TASK_STATE) {
+
+
+		} else {
+			idle_task_opt = dpm_choose_opt(dpm_active_policy,
+						       DPM_IDLE_TASK_STATE);
+			idle_opt = dpm_choose_opt(dpm_active_policy,
+						  DPM_IDLE_STATE);
+
+			if (dpm_trylock()) {
+				dpm_md_idle();
+			} else {
+
+				if ((dpm_active_opt != idle_task_opt) ||
+				    (idle_task_opt == idle_opt)) {
+
+					quick_idle();
+					dpm_unlock();
+				} else {
+					dpm_unlock();
+					full_idle(idle_task_opt, idle_opt);
+				}
+			}
+		}
+	}
+	local_irq_restore(flags);
+}
+
Index: linux-2.6.16/drivers/dpm/dpm-ui.c
===================================================================
--- linux-2.6.16.orig/drivers/dpm/dpm-ui.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/dpm-ui.c	2006-04-11 06:35:40.000000000 +0000
@@ -0,0 +1,1249 @@
+/*
+ * drivers/dpm/dpm-ui.c - userspace interface to Dynamic Power Management
+ *
+ * (c) 2003 MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express or
+ * implied.
+ */
+
+#include <linux/dpm.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+
+/* Common sysfs/proc support */
+
+char *dpm_state_names[DPM_STATES] = DPM_STATE_NAMES;
+char *dpm_param_names[DPM_PP_NBR] = DPM_PARAM_NAMES;
+
+#define MAXTOKENS 80
+
+static int
+tokenizer(char **tbuf, const char *userbuf, ssize_t n, char **tokptrs,
+	  int maxtoks)
+{
+	char *cp, *tok;
+	char *whitespace = " \t\r\n";
+	int ntoks = 0;
+
+	if (!(cp = kmalloc(n + 1, GFP_KERNEL)))
+		return -ENOMEM;
+
+	*tbuf = cp;
+	memcpy(cp, userbuf, n);
+	cp[n] = '\0';
+
+	do {
+		cp = cp + strspn(cp, whitespace);
+		tok = strsep(&cp, whitespace);
+		if ((*tok == '\0') || (ntoks == maxtoks))
+			break;
+		tokptrs[ntoks++] = tok;
+	} while(cp);
+
+	return ntoks;
+}
+
+
+/* SysFS Interface */
+
+#define dpm_attr(_name,_prefix) \
+static struct subsys_attribute _prefix##_attr = { \
+        .attr   = {                             \
+                .name = __stringify(_name),     \
+                .mode = 0644,                   \
+        },                                      \
+        .show   = _prefix##_show,                 \
+        .store  = _prefix##_store,                \
+}
+
+
+static void dpm_kobj_release(struct kobject *kobj)
+{
+	/*
+	 * No sysfs/kobject state to release, DPM layer will handle the
+	 * the containing object.
+	 */
+
+	return;
+}
+
+/*
+ * Top-level control
+ */
+
+static ssize_t dpm_control_show(struct subsystem * subsys, char * buf)
+{
+	unsigned long flags;
+	ssize_t len = 0;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled) {
+		len += sprintf(buf, "disabled\n");
+	} else {
+		spin_lock_irqsave(&dpm_policy_lock, flags);
+		len += sprintf(buf,"enabled %s %d %s %s %s\n",
+			       dpm_active_policy->name,
+			       dpm_active_state,
+			       dpm_state_names[dpm_active_state],
+			       dpm_classopt_name(dpm_active_policy,
+						 dpm_active_state),
+			       dpm_active_opt ? dpm_active_opt->name : "[none]");
+		spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	}
+
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t dpm_control_store(struct subsystem * subsys, const char * buf,
+				 size_t n)
+{
+	int error = 0;
+
+	if (strncmp(buf, "init", 4) == 0) {
+		error = dynamicpower_init();
+	} else if (strncmp(buf, "enable", 6) == 0) {
+		error = dynamicpower_enable();
+	} else if (strncmp(buf, "disable", 7) == 0) {
+		error = dynamicpower_disable();
+	} else if (strncmp(buf, "terminate", 9) == 0) {
+		error = dynamicpower_terminate();
+	} else
+		error = -EINVAL;
+
+        return error ? error : n;
+}
+
+dpm_attr(control,dpm_control);
+
+static struct attribute * g[] = {
+        &dpm_control_attr.attr,
+        NULL,
+};
+
+static struct attribute_group dpm_attr_group = {
+        .attrs = g,
+};
+
+decl_subsys(dpm, NULL, NULL);
+
+/*
+ * policy
+ */
+
+struct dpm_policy_attribute {
+        struct attribute        attr;
+        ssize_t (*show)(struct kobject * kobj, char * buf);
+        ssize_t (*store)(struct kobject * kobj, const char * buf, size_t count);
+};
+
+#define to_policy(obj) container_of(obj,struct dpm_policy,kobj)
+#define to_policy_attr(_attr) container_of(_attr,struct dpm_policy_attribute,attr)
+
+static struct kobject dpm_policy_kobj = {
+	.kset = &dpm_subsys.kset,
+};
+
+static ssize_t policy_control_show(struct subsystem * subsys, char * buf)
+{
+	ssize_t len = 0;
+	struct list_head  * p;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	len += sprintf(buf + len, "policies: ");
+
+	list_for_each(p, &dpm_policies) {
+		len += sprintf(buf + len, "%s ",
+			       ((struct dpm_policy *)
+				list_entry(p, struct dpm_policy, list))->name);
+	}
+
+	len += sprintf(buf + len, "\n");
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t policy_control_store(struct subsystem * subsys, const char * buf,
+				   size_t n)
+{
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	if (strcmp(token[0],"create") == 0) {
+		error = dpm_create_policy(token[1], &token[2], ntoks - 2);
+	} else if (strcmp(token[0],"set") == 0) {
+		if (ntoks != 2)
+			printk("dpm: policy set requires 1 policy name argument\n");
+		else
+			error = dpm_set_policy(token[1]);
+	} else
+		error = -EINVAL;
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+}
+
+static ssize_t active_policy_show(struct subsystem * subsys, char * buf)
+{
+	unsigned long flags;
+	ssize_t len = 0;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled || (dpm_active_state == DPM_NO_STATE)) {
+		len += sprintf(buf + len, "[none]\n");
+	} else {
+		spin_lock_irqsave(&dpm_policy_lock, flags);
+		len += sprintf(buf + len,"%s\n",
+			       dpm_active_policy->name);
+		spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	}
+
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t active_policy_store(struct subsystem * subsys, const char * buf,
+				   size_t n)
+{
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	error = dpm_set_policy(token[0]);
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+}
+
+dpm_attr(control,policy_control);
+dpm_attr(active,active_policy);
+
+#ifdef CONFIG_DPM_STATS
+static ssize_t policy_stats_show(struct subsystem * subsys, char * buf)
+{
+	int len = 0;
+	struct dpm_policy *policy;
+	struct list_head *p;
+	unsigned long long total_time;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled) {
+		dpm_unlock();
+		len += sprintf(buf + len, "DPM IS DISABLED\n");
+		return len;
+	}
+
+	for (p = dpm_policies.next; p != &dpm_policies; p = p->next) {
+		policy = list_entry(p, struct dpm_policy, list);
+		len += sprintf(buf + len, "policy: %s", policy->name);
+		total_time = policy->stats.total_time;
+		if (policy == dpm_active_policy)
+			total_time += (unsigned long long) dpm_time() -
+				policy->stats.start_time;
+		len += sprintf(buf + len, " ticks: %Lu times: %lu\n",
+			       (unsigned long long) dpm_time_to_usec(total_time),
+			       policy->stats.count);
+	}
+
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t policy_stats_store(struct subsystem * subsys, const char * buf,
+				  size_t n)
+{
+	return n;
+}
+
+dpm_attr(stats, policy_stats);
+#endif /* CONFIG_DPM_STATS */
+
+static ssize_t a_policy_control_show(struct kobject * kobj, char * buf)
+{
+	struct dpm_policy *policy = to_policy(kobj);
+	ssize_t len = 0;
+	int i;
+
+	len += sprintf(buf + len, "ops/classes: ");
+
+	for (i = 0; i < DPM_STATES; i++)
+		len += sprintf(buf + len, "%s ", dpm_classopt_name(policy,i));
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+static ssize_t a_policy_control_store(struct kobject * kobj, const char * buf,
+				      size_t n)
+{
+	struct dpm_policy *policy = to_policy(kobj);
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	if (strcmp(token[0],"destroy") == 0) {
+		dpm_destroy_policy(policy->name);
+	} else
+		error = -EINVAL;
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+}
+
+#define POLICY_STATE_ATTR(index) \
+static ssize_t policy_state ## index ## _show(struct kobject * kobj, \
+					      char * buf) \
+{ \
+	ssize_t len = 0; \
+	struct dpm_policy *policy = to_policy(kobj); \
+	len += sprintf(buf + len, "%s\n", policy->classopt[index].opt ? policy->classopt[index].opt->name :policy->classopt[index].class->name ); \
+	return len; \
+} \
+static ssize_t policy_state ## index ## _store(struct kobject * kobj, \
+					       const char * buf, \
+			      size_t n) \
+{ \
+	struct dpm_policy *policy = to_policy(kobj); \
+	struct dpm_classopt old_classopt; \
+	int ret; \
+ \
+	dpm_lock(); \
+	old_classopt = policy->classopt[index]; \
+	if ((ret = dpm_map_policy_state(policy,index,(char *)buf))) \
+		policy->classopt[index] = old_classopt; \
+	dpm_unlock(); \
+	return ret ? -EINVAL : n; \
+} \
+static struct dpm_policy_attribute policy_state ## index ## _attr = { \
+        .attr   = { \
+                .mode = 0644, \
+        }, \
+        .show   = policy_state ## index ## _show, \
+        .store  = policy_state ## index ## _store, \
+}; \
+
+#define MAX_POLICY_STATES 20
+POLICY_STATE_ATTR(0);
+POLICY_STATE_ATTR(1);
+POLICY_STATE_ATTR(2);
+POLICY_STATE_ATTR(3);
+POLICY_STATE_ATTR(4);
+POLICY_STATE_ATTR(5);
+POLICY_STATE_ATTR(6);
+POLICY_STATE_ATTR(7);
+POLICY_STATE_ATTR(8);
+POLICY_STATE_ATTR(9);
+POLICY_STATE_ATTR(10);
+POLICY_STATE_ATTR(11);
+POLICY_STATE_ATTR(12);
+POLICY_STATE_ATTR(13);
+POLICY_STATE_ATTR(14);
+POLICY_STATE_ATTR(15);
+POLICY_STATE_ATTR(16);
+POLICY_STATE_ATTR(17);
+POLICY_STATE_ATTR(18);
+POLICY_STATE_ATTR(19);
+
+static struct dpm_policy_attribute *policy_state_attr[MAX_POLICY_STATES] = {
+	&policy_state0_attr,
+	&policy_state1_attr,
+	&policy_state2_attr,
+	&policy_state3_attr,
+	&policy_state4_attr,
+	&policy_state5_attr,
+	&policy_state6_attr,
+	&policy_state7_attr,
+	&policy_state8_attr,
+	&policy_state9_attr,
+	&policy_state10_attr,
+	&policy_state11_attr,
+	&policy_state12_attr,
+	&policy_state13_attr,
+	&policy_state14_attr,
+	&policy_state15_attr,
+	&policy_state16_attr,
+	&policy_state17_attr,
+	&policy_state18_attr,
+	&policy_state19_attr,
+};
+
+static ssize_t
+policy_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct dpm_policy_attribute * policy_attr = to_policy_attr(attr);
+	ssize_t ret = 0;
+
+	if (policy_attr->show)
+		ret = policy_attr->show(kobj,buf);
+	return ret;
+}
+
+static ssize_t
+policy_attr_store(struct kobject * kobj, struct attribute * attr,
+		  const char * buf, size_t count)
+{
+	struct dpm_policy_attribute * policy_attr = to_policy_attr(attr);
+	ssize_t ret = 0;
+
+	if (policy_attr->store)
+		ret = policy_attr->store(kobj,buf,count);
+	return ret;
+}
+
+static struct dpm_policy_attribute a_policy_control_attr = {
+        .attr   = {
+                .name = "control",
+                .mode = 0644,
+        },
+        .show   = a_policy_control_show,
+        .store  = a_policy_control_store,
+};
+
+static struct sysfs_ops policy_sysfs_ops = {
+	.show	= policy_attr_show,
+	.store	= policy_attr_store,
+};
+
+static struct attribute * policy_default_attrs[] = {
+	&a_policy_control_attr.attr,
+	NULL,
+};
+
+static struct kobj_type ktype_policy = {
+	.release        = dpm_kobj_release,
+	.sysfs_ops	= &policy_sysfs_ops,
+	.default_attrs	= policy_default_attrs,
+};
+
+void dpm_sysfs_new_policy(struct dpm_policy *policy)
+{
+	int i;
+
+	memset(&policy->kobj, 0, sizeof(struct kobject));
+	policy->kobj.kset = &dpm_subsys.kset,
+	kobject_set_name(&policy->kobj,policy->name);
+	policy->kobj.parent = &dpm_policy_kobj;
+	policy->kobj.ktype = &ktype_policy;
+	kobject_register(&policy->kobj);
+
+	for (i = 0; (i < DPM_STATES) && (i < MAX_POLICY_STATES); i++) {
+		policy_state_attr[i]->attr.name = dpm_state_names[i];
+		sysfs_create_file(&policy->kobj, &policy_state_attr[i]->attr);
+	}
+
+	return;
+}
+
+void dpm_sysfs_destroy_policy(struct dpm_policy *policy)
+{
+	kobject_unregister(&policy->kobj);
+	return;
+}
+
+/*
+ * class
+ */
+
+struct dpm_class_attribute {
+        struct attribute        attr;
+        ssize_t (*show)(struct kobject * kobj, char * buf);
+        ssize_t (*store)(struct kobject * kobj, const char * buf, size_t count);
+};
+
+#define to_class(obj) container_of(obj,struct dpm_class,kobj)
+#define to_class_attr(_attr) container_of(_attr,struct dpm_class_attribute,attr)
+
+static ssize_t class_control_show(struct subsystem * subsys, char * buf)
+{
+	ssize_t len = 0;
+	struct list_head  * p;
+
+	len += sprintf(buf + len, "classes: ");
+
+	list_for_each(p, &dpm_classes) {
+		len += sprintf(buf + len, "%s ",
+			       ((struct dpm_class *)
+				list_entry(p, struct dpm_class, list))->name);
+	}
+
+	len += sprintf(buf + len, "\nactive: %s\n",
+		       (dpm_enabled && dpm_active_class) ?
+		       dpm_active_class->name : "[none]");
+	return len;
+}
+
+static ssize_t class_control_store(struct subsystem * subsys, const char * buf,
+				   size_t n)
+{
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	if (strcmp(token[0],"create") == 0) {
+		if (ntoks < 3)
+			printk("dpm: class create requires 1 name and at least one operating point argument\n");
+		else
+			error = dpm_create_class(token[1], &token[2], ntoks-2);
+	} else
+		error = -EINVAL;
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+}
+
+static struct kobject dpm_class_kobj = {
+	.kset = &dpm_subsys.kset,
+};
+
+dpm_attr(control,class_control);
+
+static ssize_t a_class_control_show(struct kobject * kobj, char * buf)
+{
+	ssize_t len = 0;
+	struct dpm_class *class = to_class(kobj);
+	int i;
+
+	len += sprintf(buf + len, "ops: ");
+
+	for (i = 0; i < class->nops; i++)
+		len += sprintf(buf + len, "%s ", class->ops[i]->name);
+
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+static ssize_t a_class_control_store(struct kobject * kobj, const char * buf,
+				      size_t n)
+{
+	return n;
+}
+
+static ssize_t
+class_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct dpm_class_attribute * class_attr = to_class_attr(attr);
+	ssize_t ret = 0;
+
+	if (class_attr->show)
+		ret = class_attr->show(kobj,buf);
+	return ret;
+}
+
+static ssize_t
+class_attr_store(struct kobject * kobj, struct attribute * attr,
+		 const char * buf, size_t count)
+{
+	struct dpm_class_attribute * class_attr = to_class_attr(attr);
+	ssize_t ret = 0;
+
+	if (class_attr->store)
+		ret = class_attr->store(kobj,buf,count);
+	return ret;
+}
+
+static struct dpm_class_attribute a_class_control_attr = {
+        .attr   = {
+                .name = "control",
+                .mode = 0644,
+        },
+        .show   = a_class_control_show,
+        .store  = a_class_control_store,
+};
+
+static struct sysfs_ops class_sysfs_ops = {
+	.show	= class_attr_show,
+	.store	= class_attr_store,
+};
+
+static struct attribute * class_default_attrs[] = {
+	&a_class_control_attr.attr,
+	NULL,
+};
+
+static struct kobj_type ktype_class = {
+	.release        = dpm_kobj_release,
+	.sysfs_ops	= &class_sysfs_ops,
+	.default_attrs	= class_default_attrs,
+};
+
+void dpm_sysfs_new_class(struct dpm_class *class)
+{
+	memset(&class->kobj, 0, sizeof(struct kobject));
+	class->kobj.kset = &dpm_subsys.kset,
+	kobject_set_name(&class->kobj,class->name);
+	class->kobj.parent = &dpm_class_kobj;
+	class->kobj.ktype = &ktype_class;
+	kobject_register(&class->kobj);
+	return;
+}
+
+void dpm_sysfs_destroy_class(struct dpm_class *class)
+{
+	kobject_unregister(&class->kobj);
+	return;
+}
+
+
+/*
+ * op
+ */
+
+struct dpm_op_attribute {
+        struct attribute        attr;
+        ssize_t (*show)(struct kobject * kobj, char * buf);
+        ssize_t (*store)(struct kobject * kobj, const char * buf, size_t count);
+};
+
+#define to_op(obj) container_of(obj,struct dpm_opt,kobj)
+#define to_op_attr(_attr) container_of(_attr,struct dpm_op_attribute,attr)
+
+static ssize_t op_control_show(struct subsystem * subsys, char * buf)
+{
+	unsigned long flags;
+	ssize_t len = 0;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	len += sprintf(buf + len, "active: ");
+
+	if (!dpm_enabled) {
+		len += sprintf(buf + len, "[none]\n");
+	} else {
+		spin_lock_irqsave(&dpm_policy_lock, flags);
+		len += sprintf(buf + len,"%s\n",
+			       dpm_active_opt ? dpm_active_opt->name : "[none]");
+		spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	}
+
+	dpm_unlock();
+
+	len += sprintf(buf + len, "params: %d\n", DPM_PP_NBR);
+	return len;
+}
+
+static ssize_t op_control_store(struct subsystem * subsys, const char * buf,
+				size_t n)
+{
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	if ((strcmp(token[0],"create") == 0) && (ntoks >= 2)) {
+		dpm_md_pp_t pp[DPM_PP_NBR];
+		int i;
+
+		for (i = 0; i < DPM_PP_NBR; i++) {
+			if (i >= ntoks - 2)
+				pp[i] = -1;
+			else
+				pp[i] = simple_strtol(token[i + 2],
+						      NULL, 0);
+		}
+
+		error = dpm_create_opt(token[1], pp, DPM_PP_NBR);
+	} else
+		error = -EINVAL;
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+
+}
+
+dpm_attr(control,op_control);
+
+#ifdef CONFIG_DPM_STATS
+static ssize_t op_stats_show(struct subsystem * subsys, char * buf)
+{
+	int len = 0;
+	struct dpm_opt *opt;
+	struct list_head *p;
+	unsigned long long total_time;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled) {
+		dpm_unlock();
+		len += sprintf(buf + len, "DPM IS DISABLED\n");
+		return len;
+	}
+
+	for (p = dpm_opts.next; p != &dpm_opts; p = p->next) {
+		opt = list_entry(p, struct dpm_opt, list);
+		len += sprintf(buf + len, "op: %s", opt->name);
+		total_time = opt->stats.total_time;
+		if (opt == dpm_active_opt)
+			total_time += (unsigned long long) dpm_time() -
+				opt->stats.start_time;
+		len += sprintf(buf + len, " ticks: %Lu times: %lu\n",
+			       (unsigned long long) dpm_time_to_usec(total_time),
+			       opt->stats.count);
+	}
+
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t op_stats_store(struct subsystem * subsys, const char * buf,
+			      size_t n)
+{
+	return n;
+}
+
+dpm_attr(stats, op_stats);
+#endif /* CONFIG_DPM_STATS */
+
+
+static struct kobject dpm_op_kobj = {
+	.kset = &dpm_subsys.kset,
+};
+
+static ssize_t an_op_control_show(struct kobject * kobj, char * buf)
+{
+	ssize_t len = 0;
+	// struct dpm_opt *opt = to_op(kobj);
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+static ssize_t an_op_control_store(struct kobject * kobj, const char * buf,
+				   size_t n)
+{
+	return n;
+}
+
+static struct dpm_op_attribute an_op_control_attr = {
+        .attr   = {
+                .name = "control",
+                .mode = 0644,
+        },
+        .show   = an_op_control_show,
+        .store  = an_op_control_store,
+};
+
+static ssize_t op_force_show(struct kobject * kobj, char * buf)
+{
+	ssize_t len = 0;
+	struct dpm_opt *opt = to_op(kobj);
+
+	len += sprintf(buf + len, "%d\n", opt->flags & DPM_OP_FORCE ? 1 : 0);
+	return len;
+}
+
+static ssize_t op_force_store(struct kobject * kobj, const char * buf,
+			      size_t n)
+{
+	struct dpm_opt *opt = to_op(kobj);
+
+	opt->flags = (opt->flags & ~DPM_OP_FORCE) |
+		(simple_strtol(buf, NULL, 0) ? DPM_OP_FORCE : 0);
+	return n;
+}
+
+static struct dpm_op_attribute op_force_attr = {
+        .attr   = {
+                .name = "force",
+                .mode = 0644,
+        },
+        .show   = op_force_show,
+        .store  = op_force_store,
+};
+
+#define OP_PARAM_ATTR(index) \
+static ssize_t op_param ## index ## _show(struct kobject * kobj, char * buf) \
+{ \
+	ssize_t len = 0; \
+	struct dpm_opt *opt = to_op(kobj); \
+	len += sprintf(buf + len, "%d\n", opt->pp[index]); \
+	return len; \
+} \
+static ssize_t op_param ## index ## _store(struct kobject * kobj, const char * buf, \
+			      size_t n) \
+{ \
+	struct dpm_opt *opt = to_op(kobj); \
+	int ret, oldval; \
+ \
+	oldval = opt->pp[index]; \
+	opt->pp[index] = simple_strtol(buf, NULL, 0); \
+	ret = dpm_md_init_opt(opt); \
+	if (ret) \
+		opt->pp[index] = oldval; \
+	return ret ? ret : n; \
+} \
+static struct dpm_op_attribute op_param ## index ## _attr = { \
+        .attr   = { \
+                .mode = 0644, \
+        }, \
+        .show   = op_param ## index ## _show, \
+        .store  = op_param ## index ## _store, \
+}; \
+
+#define MAX_OP_PARAMS 20
+OP_PARAM_ATTR(0);
+OP_PARAM_ATTR(1);
+OP_PARAM_ATTR(2);
+OP_PARAM_ATTR(3);
+OP_PARAM_ATTR(4);
+OP_PARAM_ATTR(5);
+OP_PARAM_ATTR(6);
+OP_PARAM_ATTR(7);
+OP_PARAM_ATTR(8);
+OP_PARAM_ATTR(9);
+OP_PARAM_ATTR(10);
+OP_PARAM_ATTR(11);
+OP_PARAM_ATTR(12);
+OP_PARAM_ATTR(13);
+OP_PARAM_ATTR(14);
+OP_PARAM_ATTR(15);
+OP_PARAM_ATTR(16);
+OP_PARAM_ATTR(17);
+OP_PARAM_ATTR(18);
+OP_PARAM_ATTR(19);
+
+static struct dpm_op_attribute *op_param_attr[MAX_OP_PARAMS] = {
+	&op_param0_attr,
+	&op_param1_attr,
+	&op_param2_attr,
+	&op_param3_attr,
+	&op_param4_attr,
+	&op_param5_attr,
+	&op_param6_attr,
+	&op_param7_attr,
+	&op_param8_attr,
+	&op_param9_attr,
+	&op_param10_attr,
+	&op_param11_attr,
+	&op_param12_attr,
+	&op_param13_attr,
+	&op_param14_attr,
+	&op_param15_attr,
+	&op_param16_attr,
+	&op_param17_attr,
+	&op_param18_attr,
+	&op_param19_attr,
+};
+
+static ssize_t
+op_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct dpm_op_attribute * op_attr = to_op_attr(attr);
+	ssize_t ret = 0;
+
+	if (op_attr->show)
+		ret = op_attr->show(kobj,buf);
+	return ret;
+}
+
+static ssize_t
+op_attr_store(struct kobject * kobj, struct attribute * attr,
+	      const char * buf, size_t count)
+{
+	struct dpm_op_attribute * op_attr = to_op_attr(attr);
+	ssize_t ret = 0;
+
+	if (op_attr->store)
+		ret = op_attr->store(kobj,buf,count);
+	return ret;
+}
+
+static struct sysfs_ops op_sysfs_ops = {
+	.show	= op_attr_show,
+	.store	= op_attr_store,
+};
+
+static struct attribute * op_default_attrs[] = {
+	&an_op_control_attr.attr,
+	&op_force_attr.attr,
+	NULL,
+};
+
+static struct kobj_type ktype_op = {
+	.release        = dpm_kobj_release,
+	.sysfs_ops	= &op_sysfs_ops,
+	.default_attrs	= op_default_attrs,
+};
+
+void dpm_sysfs_new_op(struct dpm_opt *opt)
+{
+	int i;
+
+	memset(&opt->kobj, 0, sizeof(struct kobject));
+	opt->kobj.kset = &dpm_subsys.kset,
+	kobject_set_name(&opt->kobj,opt->name);
+	opt->kobj.parent = &dpm_op_kobj;
+	opt->kobj.ktype = &ktype_op;
+	kobject_register(&opt->kobj);
+
+	for (i = 0; (i < DPM_PP_NBR) && (i < MAX_OP_PARAMS); i++) {
+		op_param_attr[i]->attr.name = dpm_param_names[i];
+		sysfs_create_file(&opt->kobj, &op_param_attr[i]->attr);
+	}
+
+	return;
+}
+
+void dpm_sysfs_destroy_op(struct dpm_opt *opt)
+{
+	kobject_unregister(&opt->kobj);
+	return;
+}
+
+
+/*
+ * state
+ */
+
+
+static ssize_t state_control_show(struct subsystem * subsys, char * buf)
+{
+	ssize_t len = 0;
+	int i;
+
+	len += sprintf(buf + len, "states: ");
+
+	for (i = 0; i < DPM_STATES; i++) {
+		len += sprintf(buf + len, "%s ", dpm_state_names[i]);
+	}
+
+	len += sprintf(buf + len, "\ntask-states: min=%s norm=%s max=%s\n",
+		       dpm_state_names[DPM_TASK_STATE - DPM_TASK_STATE_LIMIT],
+		       dpm_state_names[DPM_TASK_STATE],
+		       dpm_state_names[DPM_TASK_STATE + DPM_TASK_STATE_LIMIT]);
+
+	return len;
+}
+
+static ssize_t state_control_store(struct subsystem * subsys, const char * buf,
+				   size_t n)
+{
+	return -EINVAL;
+}
+
+static ssize_t active_state_show(struct subsystem * subsys, char * buf)
+{
+	unsigned long flags;
+	ssize_t len = 0;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled || (dpm_active_state == DPM_NO_STATE)) {
+		len += sprintf(buf + len, "[none]\n");
+	} else {
+		spin_lock_irqsave(&dpm_policy_lock, flags);
+		len += sprintf(buf + len,"%s\n",
+			       dpm_state_names[dpm_active_state]);
+		spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	}
+
+	dpm_unlock();
+	return len;
+}
+
+static ssize_t active_state_store(struct subsystem * subsys, const char * buf,
+				  size_t n)
+{
+	int error = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		error = ntoks;
+		goto out;
+	}
+
+	error = dpm_set_op_state(token[0]);
+
+ out:
+	if (tbuf)
+		kfree(tbuf);
+        return error ? error : n;
+}
+
+#ifdef CONFIG_DPM_STATS
+static ssize_t state_stats_show(struct subsystem * subsys, char * buf)
+{
+	unsigned long flags;
+	ssize_t len = 0;
+	int i;
+
+	spin_lock_irqsave(&dpm_policy_lock, flags);
+
+	for (i = 0; i < DPM_STATES; i++) {
+		unsigned long long total_time = dpm_state_stats[i].total_time;
+
+		if (i == dpm_active_state)
+			total_time += (unsigned long long) dpm_time() -
+				dpm_state_stats[i].start_time;
+
+		len += sprintf(buf + len, "state: %s", dpm_state_names[i]);
+                len += sprintf(buf + len, " ticks: %Lu",
+			       (unsigned long long) dpm_time_to_usec(total_time));
+		len += sprintf(buf + len, " times: %lu\n",
+			       dpm_state_stats[i].count);
+	}
+
+	spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	return len;
+}
+
+static ssize_t state_stats_store(struct subsystem * subsys, const char * buf,
+				 size_t n)
+{
+        return n;
+}
+#endif /* CONFIG_DPM_STATS */
+
+static struct kobject dpm_state_kobj = {
+	.kset = &dpm_subsys.kset,
+};
+
+dpm_attr(control, state_control);
+dpm_attr(active, active_state);
+#ifdef CONFIG_DPM_STATS
+dpm_attr(stats, state_stats);
+#endif
+
+struct astate {
+	int index;
+	struct kobject kobj;
+};
+
+struct astate_attribute {
+        struct attribute        attr;
+        ssize_t (*show)(struct kobject * kobj, char * buf);
+        ssize_t (*store)(struct kobject * kobj, const char * buf, size_t count);
+};
+
+#define to_astate(obj) container_of(obj,struct astate,kobj)
+#define to_astate_attr(_attr) container_of(_attr,struct astate_attribute,attr)
+
+static ssize_t
+astate_attr_show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+	struct astate_attribute * astate_attr = to_astate_attr(attr);
+	ssize_t ret = 0;
+
+	if (astate_attr->show)
+		ret = astate_attr->show(kobj,buf);
+	return ret;
+}
+
+static ssize_t
+astate_attr_store(struct kobject * kobj, struct attribute * attr,
+		  const char * buf, size_t count)
+{
+	struct astate_attribute * astate_attr = to_astate_attr(attr);
+	ssize_t ret = 0;
+
+	if (astate_attr->store)
+		ret = astate_attr->store(kobj,buf,count);
+	return ret;
+}
+
+static int show_opconstrains(int state, char *buf)
+{
+	struct dpm_opt *opt;
+	int len = 0;
+
+	if (dpm_active_policy->classopt[state].opt) {
+		opt = dpm_active_policy->classopt[state].opt;
+
+		len += dpm_show_opconstraints(opt, buf);
+	}
+	else {
+		int i;
+
+		for (i = 0;
+		     i < dpm_active_policy->classopt[state].class->nops; i++) {
+			len += dpm_show_opconstraints(
+				dpm_active_policy->classopt[state].class->ops[i], buf);
+		}
+	}
+
+	return len;
+}
+static ssize_t astate_constraints_show(struct kobject * kobj, char * buf)
+{
+	struct astate *astate = to_astate(kobj);
+	ssize_t len = 0;
+
+	if (dpm_enabled && dpm_active_policy)
+		len = show_opconstrains(astate->index, buf);
+
+	return len;
+}
+
+static ssize_t astate_constraints_store(struct kobject * kobj,
+					const char * buf, size_t n)
+{
+	return n;
+}
+
+static struct astate_attribute astate_constraints_attr = {
+        .attr   = {
+                .name = "constraints",
+                .mode = 0644,
+        },
+        .show   = astate_constraints_show,
+        .store  = astate_constraints_store,
+};
+
+static struct sysfs_ops astate_sysfs_ops = {
+	.show	= astate_attr_show,
+	.store	= astate_attr_store,
+};
+
+static struct attribute * astate_default_attrs[] = {
+	&astate_constraints_attr.attr,
+	NULL,
+};
+
+static struct kobj_type ktype_astate = {
+	.release        = dpm_kobj_release,
+	.sysfs_ops	= &astate_sysfs_ops,
+	.default_attrs	= astate_default_attrs,
+};
+
+static struct astate astate[DPM_STATES];
+
+/*
+ * Init
+ */
+
+static int __init dpm_sysfs_init(void)
+{
+        int error, i;
+
+	error = subsystem_register(&dpm_subsys);
+        if (!error)
+                error = sysfs_create_group(&dpm_subsys.kset.kobj,&dpm_attr_group);
+	if (!error) {
+		kobject_set_name(&dpm_policy_kobj, "policy");
+		kobject_register(&dpm_policy_kobj);
+		sysfs_create_file(&dpm_policy_kobj, &policy_control_attr.attr);
+		sysfs_create_file(&dpm_policy_kobj, &active_policy_attr.attr);
+#ifdef CONFIG_DPM_STATS
+		sysfs_create_file(&dpm_policy_kobj, &policy_stats_attr.attr);
+#endif
+		kobject_set_name(&dpm_class_kobj, "class");
+		kobject_register(&dpm_class_kobj);
+		sysfs_create_file(&dpm_class_kobj, &class_control_attr.attr);
+		kobject_set_name(&dpm_op_kobj, "op");
+		kobject_register(&dpm_op_kobj);
+		sysfs_create_file(&dpm_op_kobj, &op_control_attr.attr);
+#ifdef CONFIG_DPM_STATS
+		sysfs_create_file(&dpm_op_kobj, &op_stats_attr.attr);
+#endif
+		kobject_set_name(&dpm_state_kobj, "state");
+		kobject_register(&dpm_state_kobj);
+		sysfs_create_file(&dpm_state_kobj, &state_control_attr.attr);
+		sysfs_create_file(&dpm_state_kobj, &active_state_attr.attr);
+#ifdef CONFIG_DPM_STATS
+		sysfs_create_file(&dpm_state_kobj, &state_stats_attr.attr);
+#endif
+
+		for (i = 0; i < DPM_STATES; i++) {
+			astate[i].index = i;
+			astate[i].kobj.kset = &dpm_subsys.kset;
+			kobject_set_name(&astate[i].kobj,dpm_state_names[i]);
+			astate[i].kobj.parent = &dpm_state_kobj;
+			astate[i].kobj.ktype = &ktype_astate;
+			kobject_register(&astate[i].kobj);
+		}
+	}
+
+        return error;
+}
+
+__initcall(dpm_sysfs_init);
+
+/* /proc interface */
+
+int dpm_set_task_state_by_name(struct task_struct *task, char *buf, ssize_t n)
+{
+	int task_state;
+	int ret = 0;
+	char *tbuf = NULL;
+	char *token[MAXTOKENS];
+	int ntoks = tokenizer(&tbuf, buf, n, (char **) &token, MAXTOKENS);
+
+	if (ntoks <= 0) {
+		ret = ntoks;
+		goto out;
+	}
+
+	for (task_state = DPM_TASK_STATE - DPM_TASK_STATE_LIMIT;
+	     task_state <= DPM_TASK_STATE + DPM_TASK_STATE_LIMIT;
+	     task_state++)
+		if (strcmp(token[0], dpm_state_names[task_state]) == 0) {
+			task->dpm_state = task_state;
+
+			if (task == current)
+				dpm_set_os(task_state);
+
+			ret = 0;
+			break;
+		}
+
+out:
+	if (tbuf)
+		kfree(tbuf);
+
+	return ret;
+}
Index: linux-2.6.16/drivers/dpm/dpm.c
===================================================================
--- linux-2.6.16.orig/drivers/dpm/dpm.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/dpm.c	2006-04-11 06:35:40.000000000 +0000
@@ -0,0 +1,1117 @@
+/*
+ * drivers/dpm/policy.c  Dynamic Power Management Policies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved
+ *
+ * Robert Paulsen
+ * IBM Linux Technology Center
+ * rpaulsen@us.ibm.com
+ * August, 2002
+ *
+ */
+
+/* TODO:
+
+   Rethink init/enable/disable: It may be redundant and/or unsafe
+   Fix initialization and stats
+*/
+
+#include <linux/dpm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/preempt.h>
+
+#include <asm/semaphore.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#undef TRACE
+#if defined(TRACE)
+#define trace(args...) do { printk("TRACE: "); printk(args); } while(0)
+#else
+#define trace(args...) do {} while(0)
+#endif
+
+struct dpm_md dpm_md;
+
+static struct dpm_opt nop_op = {
+	.name  = "[nop]",
+	.flags = DPM_OP_NOP,
+};
+
+extern void dpm_force_off_constrainers(struct dpm_opt *opt);
+
+unsigned long dpm_compute_lpj(unsigned long ref, u_int div, u_int mult)
+{
+	unsigned long new_jiffy_l, new_jiffy_h;
+
+	/*
+	 * Recalculate loops_per_jiffy.  We do it this way to
+	 * avoid math overflow on 32-bit machines.  Maybe we
+	 * should make this architecture dependent?  If you have
+	 * a better way of doing this, please replace!
+	 *
+	 *    new = old * mult / div
+	 */
+	new_jiffy_h = ref / div;
+	new_jiffy_l = (ref % div) / 100;
+	new_jiffy_h *= mult;
+	new_jiffy_l = new_jiffy_l * mult / div;
+
+	return new_jiffy_h + new_jiffy_l * 100;
+}
+
+/****************************************************************************
+
+DPM Synchronization and Operating Point Changes
+===============================================
+
+There are 2 aspects to synchronization in DPM: First, the usual requirement of
+serializing access to shared data structures, and second, the idea of
+synchronizing the operating point and the current operating state.  The second
+condition arises because setting an operating point may complete asynchronously
+for a number of reasons, whereas the operating state change that causes the
+operating point change succeeds immediately.
+
+Access to most of the global variables representing the current state of DPM
+and the current policy are protected by a spinlock, dpm_policy_lock.  The use
+of this lock appears in only a few critical places.
+
+Setting the operating point, reading the value of the current operating point
+or changing the current policy may only be done while holding the semaphore
+_dpm_lock.  Access to the _dpm_lock is abstracted by the dpm_lock() and
+dpm_unlock() calls as explained below.  (The semaphore should only be accessed
+this way to simplify future development).
+
+The _dpm_lock must be held (by a call to a dpm_lock function) by any caller of
+the interfaces that set the operating point, change the policy, or enable or
+disable DPM.  Note that the corresponding call to dpm_unlock() may be
+explicitly required, or implicit (see dpm_set_opt_async() below).
+
+For simplicity, the calls that create operating points and policies also use
+dpm_lock() and dpm_unlock() to protect access to the non-active policies as
+well. Since these are normally initialization calls, this should not interfere
+with the operation of the system once initialized.
+
+Three interfaces are provided for obtaining the _dpm_lock:
+
+void dpm_lock();
+int dpm_lock_interruptible();
+int dpm_trylock();
+
+dpm_lock_interruptible() returns -ERESTARTSYS if the wait for the _dpm_lock was
+interrupted, and dpm_trylock() returns -EBUSY if the semaphore is currently
+held.
+
+Once the _dpm_lock is held, two interfaces are provided for setting the
+operating point:
+
+int dpm_set_opt_async()
+int dpm_set_opt_sync();
+
+Neither of these interfaces takes parameters since under DPM the operating
+point to select is always implied by the current policy and operating state.
+If the system is already at the correct operating point then no change is
+required or made.  To avoid deadlock, the caller must not be holding the
+dpm_policy_lock when either of these calls is made.
+
+dpm_set_opt_async() launches a change in the operating point that will
+potentially terminate asynchronously.  This interface never blocks the caller,
+thus there is no guarantee that the system is actually running at the implied
+operating point when control returns to the caller. This call is used by
+dpm_set_os() during an operating state change.  Note since this call terminates
+asynchronously, the call to dpm_unlock() is implicitly made when the operating
+point change is complete.  I.e., the caller obtains the _dpm_lock with
+dpm_lock(), calls dpm_set_opt_async(), then continues.
+
+dpm_set_opt_sync() launches a synchronous change in the operating point.  This
+call will block the caller as necessary during the call, thus it can only be
+issued from a process context.  When control returns to the caller, the caller
+can be sure that the implied operating point was set, and that the system is
+currently running at the correct operating point for the given policy and
+operating state.  This call is used by dpm_set_policy() and the device
+constraint update code to guarantee that the change to a new policy, or changes
+to operating point classes as a result of device constraits are reflected in
+the operating point.
+
+Note that regardless of whether an operating point change is synchrounous or
+asynchronous, it is still possible that the operating state may change during
+the call.  Setting the operating point is (currently) not preemptible,
+therefore at the time that the operating point change is complete, it may no
+longer be the correct operating point for the operating state.  This condition
+is always handled by the dpm_set_opt*() routines, which will launch a tasklet
+to re-synchronize the operating point to the operating state.
+
+It is possible that due to poorly designed policies and asynchronous
+termination of operating point changes that the operating point will always lag
+behind the operating state.  This is only a performance issue, not a
+correctness issue.  Since a valid policy has a valid operating point for every
+operating state, and changes to the policy and changes in devices constraints
+always use dpm_set_opt_sync(), there will never be a case where the current
+operating point does not support device constraints.
+
+****************************************************************************/
+
+/* curently installed policies and operating points */
+LIST_HEAD(dpm_policies);
+LIST_HEAD(dpm_classes);
+LIST_HEAD(dpm_opts);
+
+DECLARE_MUTEX(_dpm_lock);
+spinlock_t dpm_policy_lock = SPIN_LOCK_UNLOCKED;
+
+/* the currently active policy */
+struct dpm_policy *dpm_active_policy;
+
+/* the currently active operating state, class, and operating point */
+dpm_state_t dpm_active_state = DPM_NO_STATE;
+struct dpm_opt *dpm_active_opt;
+struct dpm_class *dpm_active_class;
+
+/* is DPM initialized and enabled? */
+int dpm_enabled;
+int dpm_initialized;
+
+#ifdef CONFIG_DPM_STATS
+#include <asm/div64.h>
+
+struct dpm_stats dpm_state_stats[DPM_STATES];
+
+/*
+ * Start counting DPM stats from the time DPM was enabled... in the case of
+ * operating states the stats are updated from the time userspace is started.
+ */
+
+void
+dpm_stats_reset(void)
+{
+	int i;
+
+	preempt_disable();
+	for (i = 0; i < DPM_STATES; i++) {
+		dpm_state_stats[i].total_time = 0;
+		dpm_state_stats[i].start_time = 0;
+		dpm_state_stats[i].count = 0;
+	}
+
+	if (dpm_active_state != DPM_NO_STATE) {
+		dpm_state_stats[dpm_active_state].start_time = dpm_time();
+		dpm_state_stats[dpm_active_state].count = 1;
+	}
+
+	preempt_enable();
+}
+
+
+unsigned long long
+dpm_update_stats(struct dpm_stats *new, struct dpm_stats *old)
+{
+	unsigned long long now = dpm_time();
+
+	if (old)
+		old->total_time += now - old->start_time;
+
+	if (new) {
+		new->start_time = now;
+		new->count += 1;
+	}
+
+	return now;
+}
+#else
+#define dpm_update_stats(a,b) do {} while (0)
+#define dpm_stats_reset() do {} while (0)
+#endif /* CONFIG_DPM_STATS */
+
+struct dpm_opt *
+dpm_choose_opt(struct dpm_policy *policy, int state)
+{
+	struct dpm_opt *opt = NULL;
+
+	if (policy->classopt[state].opt) {
+		opt = policy->classopt[state].opt;
+
+		if (opt->flags & DPM_OP_FORCE)
+			dpm_force_off_constrainers(opt);
+		else if (! dpm_check_constraints(opt))
+			opt = NULL;
+
+		dpm_active_class = NULL;
+	}
+	else {
+		int i;
+
+		for (i = 0; i < policy->classopt[state].class->nops; i++) {
+			if (dpm_check_constraints(
+				    policy->classopt[state].class->ops[i])) {
+				opt = policy->classopt[state].class->ops[i];
+				break;
+			}
+		}
+
+		dpm_active_class = policy->classopt[state].class;
+	}
+
+	return opt;
+}
+
+
+
+/*****************************************************************************
+ * dpm_next_opt() returns the operating point that needs to be activated next,
+ * or NULL if the operating point is up-to-date or the DPM system is disabled.
+ * Since this call looks at the value of the current operating point, it can
+ * only be made when the _dpm_lock is held.
+ *****************************************************************************/
+
+static inline struct dpm_opt *
+dpm_next_opt(void)
+{
+	struct dpm_opt *opt = NULL;
+
+	if (! spin_trylock(&dpm_policy_lock))
+		return NULL;
+	if (dpm_enabled && dpm_active_state != DPM_NO_STATE) {
+		opt = dpm_choose_opt(dpm_active_policy,dpm_active_state);
+		if (opt == dpm_active_opt)
+			opt = NULL;
+	}
+	spin_unlock(&dpm_policy_lock);
+	return opt;
+}
+
+/*****************************************************************************
+ * Set the operating point implied by the current DPM policy. These calls can
+ * only be made while holding _dpm_lock, and the release of
+ * _dpm_lock is implied by the call (see below).
+ *****************************************************************************/
+
+static struct dpm_opt temp_opt = { name : "[System Operating Point]" };
+
+int
+dpm_set_opt(struct dpm_opt *new, unsigned flags)
+{
+	int error;
+
+	if (new->flags & DPM_OP_NOP) {
+		if (flags & DPM_UNLOCK)
+			dpm_unlock();
+		return 0;
+	}
+
+	/* Support for setting the operating point when DPM is not running, and
+	   setting the first operating point. */
+
+	if (!dpm_enabled || !dpm_active_opt) {
+		if (dpm_md_get_opt(&temp_opt)) {
+			printk(KERN_ERR "dpm_default_set_opt: "
+			      "DPM disabled and system "
+			      "operating point is illegal!\n");
+
+			if (flags & DPM_UNLOCK)
+				dpm_unlock();
+			return -EINVAL;
+		}
+		dpm_active_opt = &temp_opt;
+		dpm_active_class = NULL;
+	}
+
+	/*
+	 * Remove the IRQ disable since in some cases scheduling is needed
+	 * to set an operating point (only sleep mode).  The spinlock
+	 * should suffice.  If the machine-dependent code needs interrupts
+	 * turned off during the code used for that platform for that
+	 * operating point set sequence then IRQs will need to be disabled
+	 * in that code instead.
+	 */
+	error = dpm_md.set_opt(dpm_active_opt, new);
+
+	if (error == 0) {
+		dpm_update_stats(&new->stats, &dpm_active_opt->stats);
+		dpm_active_opt = new;
+		mb();
+	}
+
+	if (flags & DPM_UNLOCK)
+		dpm_unlock();
+
+	return error;
+}
+
+/*****************************************************************************
+ * Set operating point asynchronously.  The _dpm_lock will be cleared whenever
+ * the change in operating point is complete.
+ *****************************************************************************/
+
+int
+dpm_set_opt_async(void)
+{
+	struct dpm_opt *opt = dpm_next_opt();
+
+	if (opt) {
+		dpm_trace(DPM_TRACE_SET_OPT_ASYNC, opt);
+		return dpm_set_opt(opt, DPM_UNLOCK);
+	} else {
+		dpm_trace(DPM_TRACE_SET_OPT_ASYNC, NULL);
+		dpm_unlock();
+		return 0;
+	}
+}
+
+/*****************************************************************************
+ * Set operating point synchronously.  The caller must clear _dpm_lock after the
+ * call returns.
+ *****************************************************************************/
+
+int
+dpm_set_opt_sync(void)
+{
+	struct dpm_opt *opt = dpm_next_opt();
+
+	if (opt) {
+		dpm_trace(DPM_TRACE_SET_OPT_SYNC, opt);
+		return dpm_set_opt(opt, DPM_SYNC);
+	} else
+		dpm_trace(DPM_TRACE_SET_OPT_SYNC, NULL);
+	return 0;
+}
+
+/*****************************************************************************
+ * Resynchronize the operating state and the operating point without
+ * blocking. If we don't get the lock it doesn't matter, since whenever the
+ * lock holder releases the lock the resynchronization will be tried again.
+ *****************************************************************************/
+
+static inline void
+dpm_resync(void)
+{
+
+	dpm_trace(DPM_TRACE_RESYNC);
+	if (! dpm_trylock())
+		dpm_set_opt_async();
+}
+
+void
+dpm_resync_task(unsigned long ignore)
+{
+	dpm_resync();
+}
+
+/*****************************************************************************
+ * unlock the DPM
+ *
+ * If the operating point and operating state are not in sync when _dpm_lock is
+ * released, a tasklet is launched to resynchronize them. A tasklet is used
+ * rather than simply calling dpm_set_op directly to avoid deep recursions.
+ * (I'm not sure this has worked, though).
+ *
+ * (The locking functions are inline in dpm_policy.h)
+ *
+ * This is not static since it needs to be called from dpm_policy.c
+ *****************************************************************************/
+
+DECLARE_TASKLET(dpm_resync_tasklet, dpm_resync_task, 0);
+
+void
+dpm_unlock(void)
+{
+	int retry;
+
+	retry = dpm_next_opt() != NULL;
+	dpm_trace(DPM_TRACE_UNLOCK, retry);
+	up(&_dpm_lock);
+	if (retry)
+		tasklet_schedule(&dpm_resync_tasklet);
+}
+
+/*****************************************************************************
+ * Enter a new operating state for statistical purposes.  Returns 1 if the new
+ * state may require a change in operating point and 0 otherwise.
+ *
+ * The normal case that occurs during task scheduling, where we go from task
+ * state to task state, is quickly ignored, as are changes to the
+ * DPM_NO_STATE and changes when DPM is not running.  Otherwise,
+ * dpm_enter_state() has advertised that we are in a new state, and indicates
+ * whether an operating point change is required.
+ *
+ * Note the system invariant that the operating point always eventually
+ * catches up with changes to the operating state.  This is what makes it
+ * correct here to check for common operating points.  We know
+ * that if a common operating point is not the current operating point, it
+ * will be soon.
+ *
+ * The 'quick' variant (in dpm.h) is called out separately to reduce latency
+ * for critical operating state changes where the following are known: 1) The
+ * dpm_policy_lock is held and/or interrupts are properly disabled.  2) DPM is
+ * enabled.  3) The new state is neither DPM_NO_STATE nor the same as the
+ * active state.  4) Any operating point change is being handled elsewhere.
+ *****************************************************************************/
+
+static int
+dpm_enter_state(int new_state)
+{
+	int ret = 0;
+
+	if (! spin_trylock(&dpm_policy_lock)) {
+		dpm_quick_enter_state(new_state);
+		return 0;
+	}
+
+        if ((new_state == dpm_active_state) ||
+            (new_state == DPM_NO_STATE) ||
+            !dpm_enabled) {
+		spin_unlock(&dpm_policy_lock);
+		return ret;
+        }
+
+        if ((dpm_active_policy->classopt[new_state].class !=
+             dpm_active_policy->classopt[dpm_active_state].class) ||
+            (dpm_active_policy->classopt[new_state].opt !=
+             dpm_active_policy->classopt[dpm_active_state].opt))
+                ret = 1;
+
+	dpm_quick_enter_state(new_state);
+        spin_unlock(&dpm_policy_lock);
+        return ret;
+}
+
+
+/*****************************************************************************
+ * set operating state
+ *
+ * This is used by the kernel to inform the DPM that the operating state has
+ * changed and that a new operating point should (possibly) be set as a
+ * result.
+ *
+ * If an operating point change is required it is attempted. If we can't get
+ * the lock here, then the operating point change will be activated when the
+ * current lock holder releases the lock.
+ *****************************************************************************/
+
+void
+dpm_set_os(dpm_state_t new_state)
+{
+	dpm_trace(DPM_TRACE_SET_OS, new_state);
+	if (dpm_enter_state(new_state))
+		dpm_resync();
+}
+
+EXPORT_SYMBOL(dpm_set_os);
+
+/*****************************************************************************
+ * initialize the DPM
+ *****************************************************************************/
+int
+dynamicpower_init(void)
+{
+	trace("in dynamicpower_init\n");
+
+	if (dpm_initialized) {
+		trace("DPM already initialized");
+		return -EALREADY;
+	}
+
+	/* mutex-style semaphore for access to policies and opts */
+	init_MUTEX(&_dpm_lock);
+
+	dpm_active_policy = 0;	/* this leaves the DPM temporarily
+				   disabled until a policy is
+				   activated */
+	dpm_enabled = 0;
+	dpm_initialized = 1;
+	dpm_active_state = DPM_TASK_STATE;
+
+
+	trace("DPM is now initialized\n");
+
+	return 0;
+}
+
+/*****************************************************************************
+ * (temporarily) disable the DPM
+ *****************************************************************************/
+int
+dynamicpower_disable(void)
+{
+
+	trace("in dynamicpower_disable\n");
+
+	if (! dpm_enabled) {
+		trace("DPM already disabled");
+		return -EALREADY;
+	}
+
+	dpm_lock();
+
+	dpm_enabled = 0;
+	dpm_md_cleanup();
+	dpm_active_opt = NULL;
+	dpm_active_class = NULL;
+
+	dpm_unlock();
+
+	trace("DPM is now disabled\n");
+
+	return 0;
+}
+
+/*****************************************************************************
+ * re-enable the DPM
+ * dpm_enabled = 1 implies that DPM is initialized and there is an active
+ * policy. The 'enable' call is really designed to be used after a temporary
+ * 'disable'.  All that's required to start DPM is to initialize it and set a
+ * policy.
+ *****************************************************************************/
+
+/* Need to think through enable/disable */
+
+int
+dynamicpower_enable(void)
+{
+
+	trace("in dynamicpower_enable\n");
+
+	if (dpm_enabled) {
+		trace("DPM already enabled");
+		return -EALREADY;
+	}
+
+	dpm_lock();
+
+	if (dpm_active_policy) {
+		dpm_enabled = 1;
+		mb();
+		dpm_md_startup();
+		dpm_stats_reset();
+		dpm_set_opt_sync();
+		trace("DPM is now enabled\n");
+	} else {
+		trace("No active policy, dpm_enable is ignored\n");
+	}
+
+	dpm_unlock();
+	return 0;
+}
+
+/*****************************************************************************
+ * Suspend/Resume DPM
+ * The current operating point is saved and restored. This
+ * interface is designed to be used by system suspend/resume code, to safely
+ * save/restore the DPM operating point across a system power-down, where the
+ * firmware may resume the system at a random operating point.  This does not
+ * require DPM to be enabled. Note that DPM remains locked across the
+ * suspend/resume.
+ *****************************************************************************/
+
+static struct dpm_opt suspend_opt = { name : "[Suspended Op. Point]" };
+struct dpm_opt *suspended_opt;
+
+int
+dynamicpm_suspend(void)
+{
+	int err;
+
+	trace("in dpm_suspend\n");
+
+	dpm_lock();
+
+	if (dpm_enabled && dpm_active_opt) {
+		suspended_opt = dpm_active_opt;
+	} else {
+		suspended_opt = &suspend_opt;
+		if ((err = dpm_md_get_opt(suspended_opt))) {
+			printk(KERN_CRIT
+			       "DPM can not suspend the current op. point!\n");
+			suspended_opt = NULL;
+			return err;
+		}
+	}
+	return 0;
+}
+
+void
+dynamicpm_resume(void)
+{
+	trace("in dpm_resume\n");
+
+	if (suspended_opt) {
+		dpm_active_opt = NULL;	/* Force reinitialization of DPM */
+		dpm_active_class = NULL;
+		dpm_set_opt(suspended_opt, DPM_SYNC);
+		suspended_opt = NULL;
+	}
+	dpm_unlock();
+}
+
+
+/*****************************************************************************
+ * Create a named operating point
+ * The alternate entry point can be used to create anonymous operating points
+ *****************************************************************************/
+
+int
+_dpm_create_opt(struct dpm_opt **p, const char *name,
+		const dpm_md_pp_t * md_pp, int npp)
+{
+	struct dpm_opt *opt;
+	int ret;
+
+	/* get memory for opt */
+	if (!
+	    (opt =
+	     (struct dpm_opt *) kmalloc(sizeof (struct dpm_opt), GFP_KERNEL))) {
+		return -ENOMEM;
+	}
+	trace("%s @ 0x%08lx\n", name, (unsigned long)opt);
+	memset(opt, 0, sizeof(struct dpm_opt));
+	if (!(opt->name = (char *) kmalloc(strlen(name) + 1, GFP_KERNEL))) {
+		kfree(opt);
+		return -ENOMEM;
+	}
+
+	/* initialize and validate the opt */
+	strcpy(opt->name, name);
+	memcpy(&opt->pp, md_pp, npp * sizeof(dpm_md_pp_t));
+	ret = dpm_md_init_opt(opt);
+	if (ret) {
+		kfree(opt->name);
+		kfree(opt);
+		return ret;
+	}
+	INIT_LIST_HEAD(&opt->list);
+	*p = opt;
+	dpm_sysfs_new_op(opt);
+	return 0;
+}
+
+int
+dpm_create_opt(const char *name, const dpm_md_pp_t * md_pp, int npp)
+{
+	int ret;
+	struct dpm_opt *opt;
+
+	trace("in dpm_create_opt for \"%s\"\n", name);
+
+	dpm_lock();
+
+	/* ensure name is unique */
+	list_find(opt, name, dpm_opts, struct dpm_opt);
+	if (opt) {
+		dpm_unlock();
+		return -EEXIST;
+	}
+
+	/* create the opt */
+	ret = _dpm_create_opt(&opt, name, md_pp, npp);
+
+	/* add opt to our list */
+	if (!ret)
+		list_add(&opt->list, &dpm_opts);
+
+	dpm_unlock();
+	return ret;
+}
+
+/*****************************************************************************
+ * destroy an operating point
+ * Assumes _dpm_lock is held and the opt is no longer needed *anywhere*
+ *****************************************************************************/
+void
+destroy_opt(struct dpm_opt *opt)
+{
+	dpm_sysfs_destroy_op(opt);
+	list_del(&opt->list);
+	kfree(opt->name);
+	kfree(opt);
+}
+
+/*****************************************************************************
+ * create a named class of operating points (to be used to map to an operating
+ * state)
+ *****************************************************************************/
+
+int
+dpm_create_class(const char *name, char **op_names, unsigned nops)
+{
+	int i;
+	struct dpm_class *cls;
+
+	trace("in dpm_create_class for \"%s\"\n", name);
+
+	dpm_lock();
+
+	/* ensure class is not empty */
+	if (nops == 0) {
+		dpm_unlock();
+		return -EINVAL;
+	}
+
+	/* ensure name is unique */
+	list_find(cls, name, dpm_classes, struct dpm_class);
+	if (cls) {
+		dpm_unlock();
+		return -EEXIST;
+	}
+
+	/* get memory for class */
+	cls = (struct dpm_class *) kmalloc(sizeof (struct dpm_class), GFP_KERNEL);
+	if (!cls) {
+		dpm_unlock();
+		return -ENOMEM;
+	}
+	trace("%s @ 0x%08lx\n", name, (unsigned long)cls);
+	memset(cls, 0, sizeof (struct dpm_class));
+	/* get memory for array of pointers to operating points */
+	cls->ops =
+	    (struct dpm_opt **) kmalloc(nops * sizeof (struct dpm_opt *),
+					GFP_KERNEL);
+	if (!cls->ops) {
+		kfree(cls);
+		dpm_unlock();
+		return -ENOMEM;
+	}
+
+	/* get memory for class name */
+	cls->name = (char *) kmalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!cls->name) {
+		kfree(cls->ops);
+		kfree(cls);
+		dpm_unlock();
+		return -ENOMEM;
+	}
+
+	/* find named op points and put their pointers in the class */
+	for (i = 0; i < nops; ++i) {
+		struct dpm_opt *opt;
+		list_find(opt, op_names[i], dpm_opts, struct dpm_opt);
+		if (!opt) {
+			kfree(cls->name);
+			kfree(cls->ops);
+			kfree(cls);
+			dpm_unlock();
+			return -ENOENT;
+		}
+		cls->ops[i] = opt;
+	}
+	strcpy(cls->name, name);
+	cls->nops = nops;
+	/* add class to our list */
+	list_add(&cls->list, &dpm_classes);
+
+	dpm_unlock();
+	dpm_sysfs_new_class(cls);
+
+	return 0;
+}
+
+/*****************************************************************************
+ * destroy a class
+ * Assumes _dpm_lock is held and the class is no longer needed *anywhere*
+ *****************************************************************************/
+void
+destroy_class(struct dpm_class *cls)
+{
+	dpm_sysfs_destroy_class(cls);
+	list_del(&cls->list);
+	kfree(cls->ops);
+	kfree(cls->name);
+	kfree(cls);
+}
+
+int
+dpm_map_policy_state(struct dpm_policy *policy, int state, char *classopt)
+{
+	list_find(policy->classopt[state].opt, classopt, dpm_opts,
+		  struct dpm_opt);
+
+	if(!policy->classopt[state].opt) {
+		list_find(policy->classopt[state].class, classopt,
+			  dpm_classes, struct dpm_class);
+		if(!policy->classopt[state].class)
+			return -1;
+	}
+
+	return 0;
+}
+
+/*****************************************************************************
+ * create power policy
+ *****************************************************************************/
+int
+dpm_create_policy(const char *name, char **classopt_names, int nopts)
+{
+	int i;
+	struct dpm_policy *policy;
+
+	trace("in dpm_install_policy for \"%s\" policy\n", name);
+
+	dpm_lock();
+
+	/* ensure unique name */
+	list_find(policy, name, dpm_policies, struct dpm_policy);
+	if (policy) {
+		dpm_unlock();
+		return -EEXIST;
+	}
+
+	/* get memory for policy */
+	policy =
+	    (struct dpm_policy *) kmalloc(sizeof (struct dpm_policy),
+					  GFP_KERNEL);
+	if (!policy) {
+		dpm_unlock();
+		return -ENOMEM;
+	}
+	trace("%s @ 0x%08lx\n", name, (unsigned long)policy);
+	memset(policy, 0, sizeof (struct dpm_policy));
+	/* get memory for policy name */
+	policy->name = (char *) kmalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!policy->name) {
+		kfree(policy);
+		dpm_unlock();
+		return -ENOMEM;
+	}
+
+	/* initialize the policy */
+	for (i = 0; i < DPM_STATES; ++i) {
+		if ((i >= nopts) || !classopt_names[i]) {
+			policy->classopt[i].opt	= &nop_op;
+		} else {
+			if (dpm_map_policy_state(policy, i, classopt_names[i])
+			    < 0) {
+				kfree(policy->name);
+				kfree(policy);
+				dpm_unlock();
+				return -ENOENT;
+			}
+		}
+	}
+	strcpy(policy->name, name);
+
+	/* add policy to our list */
+	list_add(&policy->list, &dpm_policies);
+	dpm_sysfs_new_policy(policy);
+	trace("installed \"%s\" policy\n", name);
+	dpm_unlock();
+	return 0;
+}
+
+/*****************************************************************************
+ * destroy a power policy
+ * Assumes _dpm_lock is held and the policy is no longer needed *anywhere*
+ *****************************************************************************/
+void
+destroy_policy(struct dpm_policy *policy)
+{
+	dpm_sysfs_destroy_policy(policy);
+	list_del(&policy->list);
+	kfree(policy->name);
+	kfree(policy);
+}
+
+/*****************************************************************************
+ * uninstall power policy
+ *****************************************************************************/
+int
+dpm_destroy_policy(const char *name)
+{
+	struct dpm_policy *policy;
+
+	trace("processing destroy request for \"%s\"\n", name);
+
+	dpm_lock();
+
+	/* find the named policy */
+	list_find(policy, name, dpm_policies, struct dpm_policy);
+	if (!policy) {
+		dpm_unlock();
+		return -ENOENT;
+	}
+
+	/* can't uninstall active policy */
+	if (policy == dpm_active_policy) {
+		dpm_unlock();
+		return -EBUSY;
+	}
+
+	/* remove the policy */
+	destroy_policy(policy);
+
+	dpm_unlock();
+	trace("destroyed \"%s\" policy\n", name);
+	return 0;
+}
+
+/*
+ * set active power policy
+ */
+int
+dpm_set_policy(const char *name)
+{
+	struct dpm_policy *new_p;
+
+	trace("in dpm_set_policy for \"%s\" policy\n", name);
+
+	dpm_lock();
+
+	list_find(new_p, name, dpm_policies, struct dpm_policy);
+	if (!new_p) {
+		dpm_trace(DPM_TRACE_SET_POLICY, name, -ENOENT);
+		dpm_unlock();
+		return -ENOENT;	/* invalid name */
+	}
+	if (new_p == dpm_active_policy) {
+		dpm_trace(DPM_TRACE_SET_POLICY, name, 0);
+		trace("\"%s\" policy already activated\n", name);
+		dpm_unlock();
+		return 0;
+	}
+
+	dpm_update_stats(&new_p->stats,
+			 dpm_active_policy ? &dpm_active_policy->stats
+			 : NULL);
+
+	dpm_active_policy = new_p;
+
+	if (! dpm_enabled) {
+		dpm_enabled = 1;
+		dpm_md_startup();
+		dpm_stats_reset();
+	}
+
+	/* Start the policy synchronously */
+
+	mb();
+	dpm_trace(DPM_TRACE_SET_POLICY, name, 0);
+	dpm_set_opt_sync();
+	dpm_unlock();
+
+	return 0;
+}
+
+/*****************************************************************************
+ * set a raw op state
+ *****************************************************************************/
+
+int
+dpm_set_op_state(const char *name)
+{
+	int op_state;
+
+	for (op_state = 0; op_state < DPM_STATES; op_state++)
+		if (strcmp(dpm_state_names[op_state], name) == 0) {
+			dpm_set_os(op_state);
+			return 0;
+		}
+
+	return -ENOENT;
+}
+
+/*****************************************************************************
+ * terminate the DPM
+ *****************************************************************************/
+int
+dynamicpower_terminate(void)
+{
+	trace("in dynamicpower_terminate\n");
+
+	if (!dpm_initialized)
+		return 0;
+
+	dpm_lock();
+
+	dpm_md_cleanup();
+
+	dpm_initialized = 0;
+	dpm_enabled = 0;
+	dpm_active_opt = NULL;
+	dpm_active_class = NULL;
+
+	/* destroy all entities */
+	while (!list_empty(&dpm_policies))
+		destroy_policy(list_entry
+			       (dpm_policies.next, struct dpm_policy, list));
+	while (!list_empty(&dpm_opts))
+		destroy_opt(list_entry(dpm_opts.next, struct dpm_opt, list));
+	while (!list_empty(&dpm_classes))
+		destroy_class(list_entry(dpm_classes.next, struct dpm_class,
+					 list));
+
+
+	mb();
+	dpm_unlock();
+
+	trace("DPM is now terminated\n");
+	printk("Dynamic Power Management is now terminated\n");
+
+	return 0;
+}
+
+EXPORT_SYMBOL(dynamicpower_init);
+EXPORT_SYMBOL(dynamicpower_terminate);
+EXPORT_SYMBOL(dynamicpower_disable);
+EXPORT_SYMBOL(dynamicpower_enable);
+EXPORT_SYMBOL(dpm_create_opt);
+EXPORT_SYMBOL(dpm_create_class);
+EXPORT_SYMBOL(dpm_create_policy);
+EXPORT_SYMBOL(dpm_destroy_policy);
+EXPORT_SYMBOL(dpm_set_policy);
+
+/****************************************************************************
+ * install dynamic power policy support
+ ****************************************************************************/
+static int __init
+dpm_init_module(void)
+{
+	int i;
+
+	/* Set the NOP operating point params to all -1. */
+
+	for (i = 0; i < DPM_PP_NBR; i++)
+		nop_op.pp[i] = -1;
+
+	trace("DPM is now installed\n");
+	return 0;
+}
+
+/****************************************************************************
+ * remove dynamic power policy support
+ ****************************************************************************/
+static void __exit
+dpm_exit_module(void)
+{
+	/* disable power management policy system */
+	dynamicpower_terminate();
+
+	trace("DPM module is now unloaded\n");
+}
+
+module_init(dpm_init_module);
+module_exit(dpm_exit_module);
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
Index: linux-2.6.16/drivers/dpm/proc.c
===================================================================
--- linux-2.6.16.orig/drivers/dpm/proc.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/drivers/dpm/proc.c	2006-04-11 06:34:11.000000000 +0000
@@ -0,0 +1,601 @@
+/*
+ * drivers/dpm/proc.c  Dynamic Power Management /proc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved
+ *
+ * Bishop Brock
+ * IBM Research, Austin Center for Low-Power Computing
+ * bcbrock@us.ibm.com
+ * September, 2002
+ *
+ */
+
+#include <linux/dpm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/semaphore.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#define DEBUG
+#ifdef DEBUG
+#define DPRINT(args...) printk(KERN_CRIT args)
+#else
+#define DPRINT(args...) do {} while (0)
+#endif
+
+/****************************************************************************
+ * /proc/driver/dpm interfaces
+ *
+ * NB: Some of these are borrowed from the 405LP, and may need to be made
+ * machine independent.
+ ****************************************************************************/
+
+/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * /proc/driver/dpm/cmd (Write-Only)
+ *
+ * Writing a string to this file is equivalent to issuing a DPM command.
+ * Currently only one command per "write" is allowed, and there is a maximum on
+ * the number of tokens that will be accepted (PAGE_SIZE / sizeof(char *)).
+ * DPM can be initialized by a linewise copy of a configuration file to this
+ * /proc file.
+ *
+ * DPM Control
+ * -----------
+ *
+ * init          : dynamicpower_init()
+ * enable        : dynamicpower_enable()
+ * disable       : dynamicpower_disable()
+ * terminate     : dynamicpower_terminate()
+ *
+ * Policy Control
+ * --------------
+ *
+ * set_policy <policy>          : Set the policy by name
+ * set_task_state <pid> <state> : Set the task state for a given pid, 0 = self
+ *
+ * Policy Creation
+ * ---------------
+ *
+ * create_opt <name> <pp0> ... <ppn>
+ *     Create a named operating point from DPM_PP_NBR paramaters.  All
+ *     parameters must be  given. Parameter order and meaning are machine
+ *     dependent.
+ *
+ * create_class <name> <opt0> [ ... <optn> ]
+ *     Create a named class from 1 or more named operating points.  All
+ *     operating points must be defined before the call.
+ *
+ * create_policy <name> <classopt0> [ ... <classoptn> ]
+ *     Create a named policy from DPM_STATES classes or operating
+ *     points.  All operating points must be defined before the call.
+ *     The order is machine dependent.
+ *
+ *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+
+static void
+pwarn(char *command, int ntoks, char *requirement, int require)
+{
+	printk(KERN_WARNING "/proc/driver/dpm/cmd: "
+	       "Command %s requires %s%d arguments - %d were given\n",
+	       command, requirement, require - 1, ntoks - 1);
+}
+
+/*****************************************************************************
+ * set a task state
+ *****************************************************************************/
+
+static int
+dpm_set_task_state(pid_t pid, dpm_state_t task_state)
+{
+	struct task_struct *p;
+
+	if (task_state == -(DPM_TASK_STATE_LIMIT + 1))
+		task_state = DPM_NO_STATE;
+	else if (abs(task_state) > DPM_TASK_STATE_LIMIT) {
+		dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, -EINVAL);
+		return -EINVAL;
+	} else
+		task_state += DPM_TASK_STATE;
+
+	read_lock(&tasklist_lock);
+
+	if (pid == 0)
+		p = current;
+	else
+		p = find_task_by_pid(pid);
+
+	if (!p) {
+		read_unlock(&tasklist_lock);
+		dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, -ENOENT);
+		return -ENOENT;
+	}
+
+	p->dpm_state = task_state;
+	read_unlock(&tasklist_lock);
+
+	dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, 0);
+
+	if (pid == 0)
+		dpm_set_os(p->dpm_state);
+
+
+	return 0;
+}
+
+
+static int
+write_proc_dpm_cmd (struct file *file, const char *buffer,
+		    unsigned long count, void *data)
+{
+	char *buf, *tok, **tokptrs;
+	char *whitespace = " \t\r\n";
+	int ret = 0, ntoks;
+
+	if (current->uid != 0)
+		return -EACCES;
+	if (count == 0)
+		return 0;
+	if (!(buf = kmalloc(count + 1, GFP_KERNEL)))
+		return -ENOMEM;
+	if (copy_from_user(buf, buffer, count)) {
+		ret = -EFAULT;
+		goto out0;
+	}
+
+	buf[count] = '\0';
+
+	if (!(tokptrs = (char **)__get_free_page(GFP_KERNEL))) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ret = -EINVAL;
+	ntoks = 0;
+	do {
+		buf = buf + strspn(buf, whitespace);
+		tok = strsep(&buf, whitespace);
+		if (*tok == '\0') {
+			if (ntoks == 0) {
+				ret = 0;
+				goto out1;
+			} else
+				break;
+		}
+		if (ntoks == (PAGE_SIZE / sizeof(char **)))
+			goto out1;
+		tokptrs[ntoks++] = tok;
+	} while(buf);
+
+	if (ntoks == 1) {
+		if (strcmp(tokptrs[0], "init") == 0) {
+			ret = dynamicpower_init();
+		} else if (strcmp(tokptrs[0], "enable") == 0) {
+			ret = dynamicpower_enable();
+		} else if (strcmp(tokptrs[0], "disable") == 0) {
+			ret = dynamicpower_disable();
+		} else if (strcmp(tokptrs[0], "terminate") == 0) {
+			ret = dynamicpower_terminate();
+		}
+	} else if (ntoks == 2) {
+		if (strcmp(tokptrs[0], "set_policy") == 0)
+			ret = dpm_set_policy(tokptrs[1]);
+		else if (strcmp(tokptrs[0], "set_state") == 0)
+			ret = dpm_set_op_state(tokptrs[1]);
+	} else {
+		if (strcmp(tokptrs[0], "set_task_state") == 0) {
+			if (ntoks != 3)
+				pwarn("set_task_state", ntoks, "", 3);
+			else
+				ret = dpm_set_task_state(simple_strtol(tokptrs[1],
+								       NULL, 0),
+							 simple_strtol(tokptrs[2],
+								       NULL, 0));
+		} else if (strcmp(tokptrs[0], "create_opt") == 0) {
+			if (ntoks != DPM_PP_NBR + 2)
+				pwarn("create_opt", ntoks,
+				      "", DPM_PP_NBR + 2);
+			else {
+				dpm_md_pp_t pp[DPM_PP_NBR];
+				int i;
+
+				for (i = 0; i < DPM_PP_NBR; i++)
+					pp[i] = simple_strtol(tokptrs[i + 2],
+							      NULL, 0);
+				ret = dpm_create_opt(tokptrs[1], pp, DPM_PP_NBR);
+			}
+
+		} else if (strcmp(tokptrs[0], "create_class") == 0) {
+			if (ntoks < 3)
+				pwarn("create_class", ntoks, ">= ", 3);
+			else
+				ret = dpm_create_class(tokptrs[1], &tokptrs[2],
+						       ntoks - 2);
+
+		} else if (strcmp(tokptrs[0], "create_policy") == 0) {
+			if (ntoks != (DPM_STATES + 2))
+				pwarn("create_policy", ntoks, "",
+				      DPM_STATES + 2);
+			else
+				ret = dpm_create_policy(tokptrs[1],
+							&tokptrs[2], ntoks-2);
+		}
+	}
+out1:
+	free_page((unsigned long)tokptrs);
+out0:
+	kfree(buf);
+	if (ret == 0)
+		return count;
+	else
+		return ret;
+}
+
+#ifdef CONFIG_DPM_STATS
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * /proc/driver/dpm/stats (Read-Only)
+ *
+ * Reading this file produces the following line for each defined operating
+ * state:
+ *
+ * state_name total_time count opt_name
+ *
+ * Where:
+ *
+ * state_name = The operating state name.
+ * total_time = The 64-bit number of microseconds spent in this
+ *              operating state.
+ * count      = The 64-bit number of times this operating state was entered.
+ * opt_name   = The name of the operating point currently assigned to this
+ *              operating state.
+ *
+ *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+
+static int
+sprintf_u64(char *buf, int fill, char *s, u64 ul)
+{
+	int len = 0;
+	u32 u, l;
+
+	u = (u32)((ul >> 32) & 0xffffffffU);
+	l = (u32)(ul & 0xffffffffU);
+
+	len += sprintf(buf + len, s);
+	if (fill)
+		len += sprintf(buf + len, "0x%08x%08x", u, l);
+	else {
+		if (u)
+			len += sprintf(buf + len, "0x%x%x", u, l);
+		else
+			len += sprintf(buf + len, "0x%x", l);
+	}
+	return len;
+}
+
+/*****************************************************************************
+ * get statistics for all operating states
+ *****************************************************************************/
+
+int
+dpm_get_os_stats(struct dpm_stats *stats)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dpm_policy_lock, flags);
+	memcpy(stats, dpm_state_stats, DPM_STATES * sizeof (struct dpm_stats));
+	stats[dpm_active_state].total_time +=
+		dpm_time() - stats[dpm_active_state].start_time;
+	spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	return 0;
+}
+
+static int
+read_proc_dpm_stats(char *page, char **start, off_t offset,
+		    int count, int *eof, void *data)
+{
+	int i, len = 0;
+	struct dpm_stats stats[DPM_STATES];
+
+	if (!dpm_enabled) {
+		len += sprintf(page + len, "DPM IS DISABLED\n");
+		*eof = 1;
+		return len;
+	}
+
+	dpm_get_os_stats(stats);
+
+	for (i = 0; i < DPM_STATES; i++) {
+		len += sprintf(page + len, "%20s", dpm_state_names[i]);
+                len += sprintf_u64(page + len, 1, " ",
+				   (u64)stats[i].total_time);
+		len += sprintf_u64(page + len, 1, " ", (u64)stats[i].count);
+		len += sprintf(page + len, " %s\n",
+			       dpm_classopt_name(dpm_active_policy,i));
+	}
+
+	*eof = 1;
+	return len;
+}
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * /proc/driver/dpm/opt_stats (Read-Only)
+ *
+ * Reading this file produces the following line for each defined operating
+ * point:
+ *
+ * name total_time count
+ *
+ * Where:
+ *
+ * name       = The operating point name.
+ * total_time = The 64-bit number of microseconds spent in this
+ *              operating state.
+ * count      = The 64-bit number of times this operating point was entered.
+ *
+ *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+
+static int
+read_proc_dpm_opt_stats(char *page, char **start, off_t offset,
+			int count, int *eof, void *data)
+{
+	int len = 0;
+	struct dpm_opt *opt;
+	struct list_head *p;
+	unsigned long long total_time;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled) {
+		dpm_unlock();
+		len += sprintf(page + len, "DPM IS DISABLED\n");
+		*eof = 1;
+		return len;
+	}
+
+	for (p = dpm_opts.next; p != &dpm_opts; p = p->next) {
+		opt = list_entry(p, struct dpm_opt, list);
+		len += sprintf(page + len, "%s", opt->name);
+		total_time = opt->stats.total_time;
+		if (opt == dpm_active_opt)
+			total_time += dpm_time() - opt->stats.start_time;
+		len += sprintf_u64(page + len, 0, " ", opt->stats.total_time);
+		len += sprintf_u64(page + len, 0, " ", opt->stats.count);
+		len += sprintf(page + len, "\n");
+	}
+
+	dpm_unlock();
+	*eof = 1;
+	return len;
+}
+#endif /* CONFIG_DPM_STATS */
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * /proc/driver/dpm/state (Read-Only)
+ *
+ * Reading this file produces the following:
+ *
+ * policy_name os os_name os_opt_name opt_name hz
+ *
+ * Where:
+ *
+ * policy_name = The name of the current policy
+ * os          = The curret operating state index
+ * os_name     = The current operating state name
+ * os_opt_name = The name of the implied operating point for the policy and
+ *               state.
+ * opt_name    = The name of the actual operating point; may be different if
+ *               the operating state and operating point are out of sync.
+ * hz          = The frequency of the statistics timer
+ *
+ * If DPM is disabled the line will appear as:
+ *
+ * N/A -1 N/A N/A N/A <hz>
+ *
+ *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+
+static int
+read_proc_dpm_state(char *page, char **start, off_t offset,
+		    int count, int *eof, void *data)
+{
+	unsigned long flags;
+
+	int len = 0;
+
+	if (dpm_lock_interruptible())
+		return -ERESTARTSYS;
+
+	if (!dpm_enabled) {
+		len += sprintf(page + len, "N/A -1 N/A N/A N/A N/A\n");
+	} else {
+
+		spin_lock_irqsave(&dpm_policy_lock, flags);
+		len += sprintf(page + len,"%s %d %s %s %s\n",
+			       dpm_active_policy->name,
+			       dpm_active_state,
+			       dpm_state_names[dpm_active_state],
+			       dpm_classopt_name(dpm_active_policy,
+						 dpm_active_state),
+			       dpm_active_opt ? dpm_active_opt->name : "none");
+		spin_unlock_irqrestore(&dpm_policy_lock, flags);
+	}
+
+	dpm_unlock();
+	*eof = 1;
+	return len;
+}
+
+
+/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ * /proc/driver/dpm/debug (Read-Only)
+ *
+ * Whatever it needs to be
+ *++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/
+
+#ifdef DEBUG
+static int
+read_proc_dpm_debug(char *page, char **start, off_t offset,
+		    int count, int *eof, void *data)
+{
+	int len = 0;
+
+	len += sprintf(page + len, "No DEBUG info\n");
+	*eof = 1;
+	return len;
+}
+#endif /* DEBUG */
+
+static struct proc_dir_entry *proc_dpm;
+static struct proc_dir_entry *proc_dpm_cmd;
+static struct proc_dir_entry *proc_dpm_state;
+
+#ifdef CONFIG_DPM_STATS
+static struct proc_dir_entry *proc_dpm_stats;
+static struct proc_dir_entry *proc_dpm_opt_stats;
+#endif
+
+#ifdef DEBUG
+static struct proc_dir_entry *proc_dpm_debug;
+#endif
+
+#ifdef CONFIG_DPM_TRACE
+static struct proc_dir_entry *proc_dpm_trace;
+#endif
+
+static int __init
+dpm_proc_init(void)
+{
+	proc_dpm = proc_mkdir("driver/dpm", NULL);
+
+	if (proc_dpm) {
+
+		proc_dpm_cmd =
+			create_proc_entry("cmd",
+					  S_IWUSR,
+					  proc_dpm);
+		if (proc_dpm_cmd)
+			proc_dpm_cmd->write_proc = write_proc_dpm_cmd;
+
+		proc_dpm_state =
+			create_proc_read_entry("state",
+					       S_IRUGO,
+					       proc_dpm,
+					       read_proc_dpm_state,
+					       NULL);
+#ifdef CONFIG_DPM_STATS
+		proc_dpm_stats =
+			create_proc_read_entry("stats",
+					       S_IRUGO,
+					       proc_dpm,
+					       read_proc_dpm_stats,
+					       NULL);
+		proc_dpm_opt_stats =
+			create_proc_read_entry("opt_stats",
+					       S_IRUGO,
+					       proc_dpm,
+					       read_proc_dpm_opt_stats,
+					       NULL);
+
+#endif /* CONFIG_DPM_STATS */
+
+#ifdef DEBUG
+		proc_dpm_debug =
+			create_proc_read_entry("debug",
+					       S_IRUGO,
+					       proc_dpm,
+					       read_proc_dpm_debug,
+					       NULL);
+#endif
+
+#ifdef CONFIG_DPM_TRACE
+		proc_dpm_trace =
+			create_proc_read_entry("trace",
+					       S_IWUSR | S_IRUGO,
+					       proc_dpm,
+					       read_proc_dpm_trace,
+					       NULL);
+		if (proc_dpm_trace)
+			proc_dpm_trace->write_proc = write_proc_dpm_trace;
+#endif
+	} else {
+	  printk(KERN_ERR "Attempt to create /proc/driver/dpm failed\n");
+
+	}
+	return 0;
+}
+
+static void __exit
+dpm_proc_exit(void)
+{
+	if (proc_dpm_cmd) {
+		remove_proc_entry("cmd", proc_dpm);
+		proc_dpm_cmd = NULL;
+	}
+
+	if (proc_dpm_state) {
+		remove_proc_entry("state", proc_dpm);
+		proc_dpm_state = NULL;
+	}
+
+#ifdef CONFIG_DPM_STATS
+	if (proc_dpm_stats) {
+		remove_proc_entry("stats", proc_dpm);
+		proc_dpm_stats = NULL;
+	}
+
+	if (proc_dpm_opt_stats) {
+		remove_proc_entry("opt_stats", proc_dpm);
+		proc_dpm_opt_stats = NULL;
+	}
+#endif /* CONFIG_DPM_STATS */
+
+#ifdef DEBUG
+	if (proc_dpm_debug) {
+		remove_proc_entry("debug", proc_dpm);
+		proc_dpm_debug = NULL;
+	}
+#endif
+
+#ifdef CONFIG_DPM_TRACE
+	if (proc_dpm_trace) {
+		remove_proc_entry("trace", proc_dpm);
+		proc_dpm_trace = NULL;
+	}
+#endif
+
+	remove_proc_entry("driver/dpm", NULL);
+}
+
+
+
+module_init(dpm_proc_init);
+module_exit(dpm_proc_exit);
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
+
Index: linux-2.6.16/fs/proc/base.c
===================================================================
--- linux-2.6.16.orig/fs/proc/base.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/fs/proc/base.c	2006-04-11 06:34:11.000000000 +0000
@@ -167,6 +167,10 @@
 	PROC_TID_OOM_SCORE,
 	PROC_TID_OOM_ADJUST,
 
+#ifdef CONFIG_DPM
+	PROC_TGID_DPM,
+#endif
+
 	/* Add new entries before this */
 	PROC_TID_FD_DIR = 0x8000,	/* 0x8000-0xffff */
 };
@@ -221,6 +225,9 @@
 #ifdef CONFIG_AUDITSYSCALL
 	E(PROC_TGID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
 #endif
+#ifdef CONFIG_DPM
+	E(PROC_TGID_DPM,   	"dpmstate",  S_IFREG|S_IRUGO|S_IWUSR),
+#endif
 	{0,0,NULL,0}
 };
 static struct pid_entry tid_base_stuff[] = {
@@ -432,10 +439,10 @@
 		goto out_mm;	/* Shh! No looking before we're done */
 
  	len = mm->arg_end - mm->arg_start;
- 
+
 	if (len > PAGE_SIZE)
 		len = PAGE_SIZE;
- 
+
 	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
 
 	// If the nul at the end of args has been overwritten, then
@@ -783,18 +790,18 @@
 		goto out;
 
 	ret = 0;
- 
+
 	mm = get_task_mm(task);
 	if (!mm)
 		goto out_free;
 
 	ret = -EIO;
- 
+
 	if (file->private_data != (void*)((long)current->self_exec_id))
 		goto out_put;
 
 	ret = 0;
- 
+
 	while (count > 0) {
 		int this_len, retval;
 
@@ -810,7 +817,7 @@
 			ret = -EFAULT;
 			break;
 		}
- 
+
 		ret += retval;
 		src += retval;
 		buf += retval;
@@ -862,7 +869,7 @@
 		copied += retval;
 		buf += retval;
 		dst += retval;
-		count -= retval;			
+		count -= retval;
 	}
 	*ppos = dst;
 	free_page((unsigned long) page);
@@ -1096,7 +1103,7 @@
 
 	if (!tmp)
 		return -ENOMEM;
-		
+
 	inode = dentry->d_inode;
 	path = d_path(dentry, mnt, tmp, PAGE_SIZE);
 	len = PTR_ERR(path);
@@ -1304,7 +1311,7 @@
 	struct proc_inode *ei;
 
 	/* We need a new inode */
-	
+
 	inode = new_inode(sb);
 	if (!inode)
 		goto out;
@@ -1462,6 +1469,56 @@
 	return ~0U;
 }
 
+#ifdef CONFIG_DPM
+#include <linux/dpm.h>
+
+extern int dpm_set_task_state_by_name(struct task_struct *, char *, ssize_t);
+
+static ssize_t proc_dpm_read(struct file * file, char __user * buf,
+			     size_t count, loff_t *ppos)
+{
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	int len;
+	char lbuf[80];
+
+	if (*ppos != 0)
+		return 0;
+
+	len = sprintf(lbuf,"%s\n", task->dpm_state == DPM_NO_STATE ?
+		      "none" : dpm_state_names[task->dpm_state]);
+
+	if (copy_to_user(buf, lbuf, len))
+		return -EFAULT;
+
+	*ppos += len;
+	return len;
+}
+
+static ssize_t proc_dpm_write(struct file * file, const char * buf,
+			      size_t count, loff_t *ppos)
+{
+	struct task_struct *task = proc_task(file->f_dentry->d_inode);
+	char lbuf[80];
+	int error;
+	ssize_t len;
+
+	len = (count < 80) ? count : 79;
+
+	if (copy_from_user(lbuf, buf, len))
+		return -EFAULT;
+
+	lbuf[79] = 0;
+	error = dpm_set_task_state_by_name(task, lbuf, len);
+	*ppos += count;
+	return error ? error : count;
+}
+
+static struct file_operations proc_dpm_operations = {
+	.read		= proc_dpm_read,
+	.write		= proc_dpm_write,
+};
+#endif
+
 /* SMP-safe */
 static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
 {
@@ -1551,8 +1608,8 @@
 	if (!(page = __get_free_page(GFP_KERNEL)))
 		return -ENOMEM;
 
-	length = security_getprocattr(task, 
-				      (char*)file->f_dentry->d_name.name, 
+	length = security_getprocattr(task,
+				      (char*)file->f_dentry->d_name.name,
 				      (void*)page, count);
 	if (length >= 0)
 		length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
@@ -1562,32 +1619,32 @@
 
 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
 				   size_t count, loff_t *ppos)
-{ 
+{
 	struct inode * inode = file->f_dentry->d_inode;
-	char *page; 
-	ssize_t length; 
-	struct task_struct *task = proc_task(inode); 
+	char *page;
+	ssize_t length;
+	struct task_struct *task = proc_task(inode);
 
-	if (count > PAGE_SIZE) 
-		count = PAGE_SIZE; 
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
 	if (*ppos != 0) {
 		/* No partial writes. */
 		return -EINVAL;
 	}
-	page = (char*)__get_free_page(GFP_USER); 
-	if (!page) 
+	page = (char*)__get_free_page(GFP_USER);
+	if (!page)
 		return -ENOMEM;
-	length = -EFAULT; 
-	if (copy_from_user(page, buf, count)) 
+	length = -EFAULT;
+	if (copy_from_user(page, buf, count))
 		goto out;
 
-	length = security_setprocattr(task, 
-				      (char*)file->f_dentry->d_name.name, 
+	length = security_setprocattr(task,
+				      (char*)file->f_dentry->d_name.name,
 				      (void*)page, count);
 out:
 	free_page((unsigned long) page);
 	return length;
-} 
+}
 
 static struct file_operations proc_pid_attr_operations = {
 	.read		= proc_pid_attr_read,
@@ -1603,7 +1660,7 @@
 static int get_tid_list(int index, unsigned int *tids, struct inode *dir);
 
 /* SMP-safe */
-static struct dentry *proc_pident_lookup(struct inode *dir, 
+static struct dentry *proc_pident_lookup(struct inode *dir,
 					 struct dentry *dentry,
 					 struct pid_entry *ents)
 {
@@ -1787,6 +1844,12 @@
 			inode->i_fop = &proc_loginuid_operations;
 			break;
 #endif
+#ifdef CONFIG_DPM
+		case PROC_TGID_DPM:
+  			inode->i_op = &proc_fd_inode_operations;
+			inode->i_fop = &proc_dpm_operations;
+			break;
+#endif
 		default:
 			printk("procfs: impossible type (%d)",p->type);
 			iput(inode);
@@ -1888,7 +1951,7 @@
 	char tmp[30];
 	sprintf(tmp, "%d", current->tgid);
 	return ERR_PTR(vfs_follow_link(nd,tmp));
-}	
+}
 
 static struct inode_operations proc_self_inode_operations = {
 	.readlink	= proc_self_readlink,
@@ -1939,7 +2002,7 @@
  *
  * Shrink the /proc directory that was used by the just killed thread.
  */
-	
+
 void proc_pid_flush(struct dentry *proc_dentry)
 {
 	might_sleep();
Index: linux-2.6.16/include/asm-arm/dpm.h
===================================================================
--- linux-2.6.16.orig/include/asm-arm/dpm.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/include/asm-arm/dpm.h	2006-04-11 06:34:11.000000000 +0000
@@ -0,0 +1,30 @@
+/*
+ * include/asm-arm/dpm.h       Arch-dependent DPM defines for ARM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, MontaVista Software <source@mvista.com>
+ *
+ * Based on include/asm-ppc/dpm.h by Robert Paulsen.  Copyright (C)
+ * 2002, International Business Machines Corporation, All Rights
+ * Reserved.
+ * */
+
+#ifndef __ASM_DPM_H__
+#define __ASM_DPM_H__
+
+#include <asm/arch/dpm.h>
+
+#endif /* __ASM_DPM_H__ */
Index: linux-2.6.16/include/asm-arm/system.h
===================================================================
--- linux-2.6.16.orig/include/asm-arm/system.h	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/include/asm-arm/system.h	2006-04-11 06:34:11.000000000 +0000
@@ -169,6 +169,7 @@
 #define switch_to(prev,next,last)					\
 do {									\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
+	dpm_set_os(current->dpm_state);					\
 } while (0)
 
 /*
Index: linux-2.6.16/include/asm-i386/dpm-centrino.h
===================================================================
--- linux-2.6.16.orig/include/asm-i386/dpm-centrino.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/include/asm-i386/dpm-centrino.h	2006-04-11 06:34:11.000000000 +0000
@@ -0,0 +1,30 @@
+/*
+ * include/asm-i386/dpm-centrino.h    DPM defines for Intel Centrino
+ *
+ * 2003 (c) MontaVista Software, Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __ASM_DPM_CENTRINO_H__
+#define __ASM_DPM_CENTRINO_H__
+
+/* MD operating point parameters */
+#define DPM_MD_CPU_FREQ		0  /* CPU freq */
+#define DPM_MD_V		1  /* core voltage */
+
+#define DPM_PP_NBR 2
+
+#define DPM_PARAM_NAMES \
+{ "cpu", "v" }
+
+/* Instances of this structure define valid Innovator operating points for DPM.
+   Voltages are represented in mV, and frequencies are represented in KHz. */
+
+struct dpm_md_opt {
+        unsigned int v;         /* Target voltage in mV */
+	unsigned int cpu;	/* CPU frequency in KHz */
+};
+
+#endif /* __ASM_DPM_CENTRINO_H__ */
Index: linux-2.6.16/include/asm-i386/dpm.h
===================================================================
--- linux-2.6.16.orig/include/asm-i386/dpm.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/include/asm-i386/dpm.h	2006-04-11 06:35:40.000000000 +0000
@@ -0,0 +1,86 @@
+/*
+ * include/asm-i386/dpm.h        Platform-dependent DPM defines for x86
+ *
+ * 2003 (c) MontaVista Software, Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __ASM_DPM_H__
+#define __ASM_DPM_H__
+
+/*
+ * machine dependent operating state
+ *
+ * An operating state is a cpu execution state that has implications for power
+ * management. The DPM will select operating points based largely on the
+ * current operating state.
+ *
+ * DPM_STATES is the number of supported operating states. Valid operating
+ * states are from 0 to DPM_STATES-1 but when setting an operating state the
+ * kernel should only specify a state from the set of "base states" and should
+ * do so by name.  During the context switch the new operating state is simply
+ * extracted from current->dpm_state.
+ *
+ * task states:
+ *
+ * APIs that reference task states use the range -(DPM_TASK_STATE_LIMIT + 1)
+ * through +DPM_TASK_STATE_LIMIT.  This value is added to DPM_TASK_STATE to
+ * obtain the downward or upward adjusted task state value. The
+ * -(DPM_TASK_STATE_LIMIT + 1) value is interpreted specially, and equates to
+ * DPM_NO_STATE.
+ *
+ * Tasks inherit their task operating states across calls to
+ * fork(). DPM_TASK_STATE is the default operating state for all tasks, and is
+ * inherited from init.  Tasks can change (or have changed) their tasks states
+ * using the DPM_SET_TASK_STATE variant of the sys_dpm() system call.  */
+
+#define DPM_IDLE_TASK_STATE  0
+#define DPM_IDLE_STATE       1
+#define DPM_BASE_STATES      2
+
+#define DPM_TASK_STATE_LIMIT 4
+#define DPM_TASK_STATE       (DPM_BASE_STATES + DPM_TASK_STATE_LIMIT)
+#define DPM_STATES           (DPM_TASK_STATE + DPM_TASK_STATE_LIMIT + 1)
+#define DPM_TASK_STATES      (DPM_STATES - DPM_BASE_STATES)
+
+#define DPM_STATE_NAMES                  \
+{ "idle-task", "idle",\
+  "task-4", "task-3", "task-2", "task-1",\
+  "task",                                \
+  "task+1", "task+2", "task+3", "task+4" \
+}
+
+#ifdef CONFIG_DPM_CENTRINO
+#include <asm/dpm-centrino.h>
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <linux/types.h>
+#include <asm/timex.h>
+
+#define dpm_time() get_cycles()
+
+#define dpm_time_to_usec(ticks) ({ \
+	unsigned long long quot = (unsigned long long) ticks * 10; \
+	do_div(quot, (unsigned long) (cpu_khz / 100)); \
+	quot; })
+
+/* Board-dependent routines. */
+
+struct dpm_opt;
+
+struct dpm_bd {
+	int (*startup)(void);				/* startup */
+	void (*cleanup)(void);				/* terminate */
+	int (*init_opt)(struct dpm_opt *opt);		/* init an opt */
+	int (*get_opt)(struct dpm_opt *opt);		/* get current opt */
+	int (*set_opt)(struct dpm_md_opt *md_opt);	/* set opt */
+};
+
+extern struct dpm_bd dpm_bd;
+
+#endif /* __ASSEMBLER__ */
+#endif /* __ASM_DPM_H__ */
Index: linux-2.6.16/include/linux/device.h
===================================================================
--- linux-2.6.16.orig/include/linux/device.h	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/include/linux/device.h	2006-04-11 06:34:11.000000000 +0000
@@ -336,6 +336,8 @@
 
 	struct list_head	dma_pools;	/* dma pools (if dma'ble) */
 
+	struct constraints	*constraints;
+
 	struct dma_coherent_mem	*dma_mem; /* internal for coherent mem
 					     override */
 
Index: linux-2.6.16/include/linux/dpm-trace.h
===================================================================
--- linux-2.6.16.orig/include/linux/dpm-trace.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/include/linux/dpm-trace.h	2006-04-11 06:34:11.000000000 +0000
@@ -0,0 +1,65 @@
+/*
+ * include/linux/dpm.h  DPM policy management
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved
+ *
+ * Robert Paulsen
+ * IBM Linux Technology Center
+ * rpaulsen@us.ibm.com
+ * August, 2002
+ *
+ */
+
+#ifndef __DPM_TRACE_H_
+#define __DPM_TRACE_H_
+
+#include <linux/config.h>
+
+#ifdef CONFIG_DPM_TRACE
+
+#define DPM_TRACE_SET_OPT_ASYNC  0x00000001
+#define DPM_TRACE_SET_OPT_SYNC   0x00000002
+#define DPM_TRACE_RESYNC         0x00000004
+#define DPM_TRACE_UNLOCK         0x00000008
+#define DPM_TRACE_SET_OS         0x00000010
+#define DPM_TRACE_SET_POLICY     0x00000020
+#define DPM_TRACE_START          0x00000040
+#define DPM_TRACE_STOP           0x00000080
+#define DPM_TRACE_SET_TASK_STATE 0x00000100
+
+#define DPM_TRACE_ALL            0x000001ff
+
+void dpm_trace(unsigned event, ...);
+void dpm_trace_start(unsigned events);
+void dpm_trace_stop(void);
+void dpm_trace_reset(void);
+
+int
+read_proc_dpm_trace(char *page, char **start, off_t offset,
+		    int count, int *eof, void *data);
+int
+write_proc_dpm_trace(struct file *file, const char *buffer,
+		     unsigned long count, void *data);
+
+#else
+
+#define dpm_trace(args...) do {} while (0)
+
+#endif /* CONFIG_DPM_TRACE */
+
+#endif /*__DPM_TRACE_H_*/
Index: linux-2.6.16/include/linux/dpm.h
===================================================================
--- linux-2.6.16.orig/include/linux/dpm.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16/include/linux/dpm.h	2006-04-11 06:35:40.000000000 +0000
@@ -0,0 +1,409 @@
+/*
+ * include/linux/dpm.h  DPM policy management
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved
+ *
+ * Robert Paulsen
+ * IBM Linux Technology Center
+ * rpaulsen@us.ibm.com
+ * August, 2002
+ *
+ */
+
+#ifndef __DPM_H__
+#define __DPM_H__
+
+#include <linux/config.h>
+#include <linux/device.h>
+
+#define DPM_NO_STATE   -1
+
+#ifndef CONFIG_DPM
+
+/* The above and following constants must always be defined for the
+   benefit of the init task and system tasks, although they are
+   otherwise ignored if DPM is not configured. */
+
+#define DPM_TASK_STATE 0
+#define dpm_set_os(task_state) do {} while (0);
+
+#else /* CONFIG_DPM */
+
+#include <asm/dpm.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+#include <linux/notifier.h>
+
+/* max size of DPM names */
+enum {DPM_NAME_SIZE=256};
+
+#include <linux/dpm-trace.h>
+#include <linux/list.h>
+#include <asm/semaphore.h>
+#include <asm/atomic.h>
+
+/* statistics */
+struct dpm_stats {
+        unsigned long count;
+        unsigned long long total_time;
+        unsigned long long start_time;
+};
+
+extern struct dpm_stats dpm_state_stats[DPM_STATES];
+
+/* update statistics structures */
+extern unsigned long long dpm_update_stats(struct dpm_stats *new,
+					   struct dpm_stats *old);
+
+typedef int dpm_state_t;
+typedef int dpm_md_pp_t;
+
+/* A table of processor-dependent routines, must be initialized by
+   platform-dependent boot code.  None of the entries (that will actually be
+   called) are allowed to be NULL if DPM is enabled. */
+
+struct dpm_opt;
+
+struct dpm_md {
+	int	(*init_opt)(struct dpm_opt *opt);
+	int	(*set_opt)(struct dpm_opt *cur, struct dpm_opt *new);
+	int	(*get_opt)(struct dpm_opt *opt);
+	int	(*check_constraint)(struct constraint_param *param,
+				    struct dpm_opt *opt);
+	void	(*idle)(void);
+	void	(*startup)(void);
+	void	(*cleanup)(void);
+};
+
+
+/*****************************************************************************
+ * Search a list looking for a named entity.
+ * A pointer to the found element is put in the variable named by the
+ * "answer" argument (or it is set to zero if not found).
+ * The structure's type name is given by the "element_type" argument.
+ * The name being looked for is given by the "find_me" argument.
+ * The name of the stand-alone list_head is given by the "list_name" argument.
+ * Assumes the proper semaphore is held.
+ * Assumes the structure's list_head is named "list".
+ * Assumes the structure's name is in a field called "name"
+ *****************************************************************************/
+#define list_find(answer,find_me,list_name,element_type)        \
+        do {                                                    \
+                element_type            *elm;                   \
+                struct list_head        *scan;                  \
+                (answer)=0;                                     \
+                for(scan=list_name.next;scan!=&list_name;       \
+                                scan=scan->next) {              \
+                        elm=list_entry(scan,element_type,list); \
+                        if (strncmp((find_me),elm->name,        \
+                                        DPM_NAME_SIZE)==0) {    \
+                                (answer)=elm;                   \
+                                break;                          \
+                        }                                       \
+                }                                               \
+        } while(0)
+
+/* internal representation of an operating point */
+
+#define DPM_OP_FORCE	0x0001
+#define DPM_OP_NOP	0x0002
+
+struct dpm_opt {
+	char			*name;          /* name */
+	struct list_head	list;		/* all installed op points */
+	dpm_md_pp_t             pp[DPM_PP_NBR]; /* initialization params */
+	struct dpm_md_opt	md_opt;         /* machine dependent part */
+	int			constrained;	/* is this opt constrained? */
+	struct kobject		kobj;		/* kobject */
+	struct dpm_stats        stats;          /* statistics */
+	int			flags;
+};
+
+/* internal representation of a class of op points (to be mapped to an
+ * operating state */
+struct dpm_class {
+	char			*name;          /* name */
+	struct list_head	list;		/* all installed classes */
+	unsigned		nops;		/* nbr ops in this class */
+	struct dpm_opt		**ops;		/* the ops in this class */
+	struct kobject		kobj;		/* kobject */
+	struct dpm_stats        stats;          /* statistics */
+};
+
+/*
+ * temporary support for policies to map operating points to either
+ * operating pts or classes.  Only one field allowed to be set.
+ */
+
+struct dpm_classopt {
+	struct dpm_opt		*opt;
+	struct dpm_class	*class;
+};
+
+/* internal representation of an installed power policy */
+struct dpm_policy {
+	char			*name;          /* name */
+	struct list_head	list;		/* all installed policies */
+	struct dpm_classopt     classopt[DPM_STATES]; /* classes/op pts */
+	struct kobject		kobj;		/* kobject */
+	struct dpm_stats        stats;          /* statistics */
+};
+
+/*
+ * internal use utility functions for use by DPM
+ */
+
+/* DPM semaphore locking. To simplify future expansion, don't 'down' _dpm_lock
+   directly.  Also, _dpm_lock must be 'up'ed only by dpm_unlock(). */
+
+extern struct semaphore _dpm_lock;
+
+static inline void
+dpm_lock(void)
+{
+        down(&_dpm_lock);
+}
+
+static inline int
+dpm_lock_interruptible(void)
+{
+        if (down_interruptible(&_dpm_lock))
+                return -ERESTARTSYS;
+        return 0;
+}
+
+static inline int
+dpm_trylock(void)
+{
+        if (down_trylock(&_dpm_lock))
+                return -EBUSY;
+        return 0;
+}
+
+void dpm_unlock(void);
+void dpm_idle(void);
+
+/* set operating state */
+void dpm_set_os(dpm_state_t state);
+
+/*
+ * names of DPM stuff for userspace interfaces
+ */
+
+extern char *dpm_state_names[DPM_STATES];
+extern char *dpm_param_names[DPM_PP_NBR];
+
+/* initialize/terminate the DPM */
+int dynamicpower_init(void);
+int dynamicpower_terminate(void);
+
+/* (temporarily) disable the DPM */
+int dynamicpower_disable(void);
+
+/* re-enable the DPM */
+int dynamicpower_enable(void);
+
+/* suspend/resume DPM across a system shutdown */
+int dynamicpm_suspend(void);
+void dynamicpm_resume(void);
+
+/* create operating point */
+int dpm_create_opt(const char *name, const dpm_md_pp_t *pp, int npp);
+
+/* create class of operating points */
+int dpm_create_class(const char *name, char **op_names, unsigned nops);
+
+/* create policy */
+int dpm_create_policy(const char *name, char **opt_names, int nopts);
+int dpm_map_policy_state(struct dpm_policy *policy, int state, char *classopt);
+
+/* destroy policy */
+int dpm_destroy_policy(const char *name);
+
+/* activate a power policy */
+int dpm_set_policy(const char *name);
+
+/* get name of active power policy */
+int dpm_get_policy(char *name);
+
+/* set a raw operating state */
+int dpm_set_op_state(const char *name);
+int dpm_set_opt(struct dpm_opt *opt, unsigned flags);
+
+/* choose unconstrained operating point from policy */
+extern struct dpm_opt *dpm_choose_opt(struct dpm_policy *policy, int state);
+
+
+/* constraints */
+int dpm_check_constraints(struct dpm_opt *opt);
+int dpm_default_check_constraint(struct constraint_param *param,
+				 struct dpm_opt *opt);
+int dpm_show_opconstraints(struct dpm_opt *opt, char * buf);
+
+/* driver scale callbacks */
+void dpm_driver_scale(int level, struct dpm_opt *newop);
+void dpm_register_scale(struct notifier_block *nb, int level);
+void dpm_unregister_scale(struct notifier_block *nb, int level);
+
+/* utils */
+extern void dpm_udelay(unsigned uS);
+extern void dpm_udelay_from(u64 start, unsigned uS);
+extern unsigned long dpm_compute_lpj(unsigned long ref, u_int div, u_int mult);
+
+/*
+ * sysfs interface
+ */
+
+extern void dpm_sysfs_new_policy(struct dpm_policy *policy);
+extern void dpm_sysfs_destroy_policy(struct dpm_policy *policy);
+extern void dpm_sysfs_new_class(struct dpm_class *class);
+extern void dpm_sysfs_destroy_class(struct dpm_class *class);
+extern void dpm_sysfs_new_op(struct dpm_opt *opt);
+extern void dpm_sysfs_destroy_op(struct dpm_opt *opt);
+
+extern int proc_pid_dpm_read(struct task_struct*,char*);
+
+
+/*
+ * global data for power management system
+ */
+
+/* curently installed policies, classes and operating points */
+extern struct list_head		dpm_policies;
+extern struct list_head		dpm_classes;
+extern struct list_head		dpm_opts;
+extern struct semaphore		dpm_policy_sem;
+extern spinlock_t		dpm_policy_lock;
+
+/* the currently active policy, class, state, point */
+extern struct dpm_policy	*dpm_active_policy;
+extern struct dpm_class		*dpm_active_class;
+extern dpm_state_t		dpm_active_state;
+extern struct dpm_opt		*dpm_active_opt;
+
+/* is DPM initialized and enabled? */
+extern int			dpm_initialized;
+extern int			dpm_enabled;
+
+extern inline void
+dpm_quick_enter_state(int new_state)
+{
+#ifdef CONFIG_DPM_STATS
+	dpm_update_stats(new_state != DPM_NO_STATE ?
+			 &dpm_state_stats[new_state] : NULL,
+			 dpm_active_state != DPM_NO_STATE ?
+			 &dpm_state_stats[dpm_active_state] : NULL);
+#endif
+
+        dpm_active_state = new_state;
+}
+
+/* Flags for dpm_set_opt().  By default, dpm_set_op() is guaranteed not
+   to block the caller, and will arrange to complete asynchronously if
+   necessary.
+
+   DPM_SYNC    The operating point is guaranteed to be set when the call
+               returns. The call may block.
+
+   DPM_UNLOCK  The caller requires dpm_md_set_opt() to unlock the DPM system
+               once the operating point is set.
+*/
+
+#define DPM_SYNC      0x01
+#define DPM_UNLOCK    0x02
+
+/*
+ * Common machine-dependent and board-dependent function wrappers.
+ */
+
+extern struct dpm_md dpm_md;
+
+static inline void
+dpm_md_startup(void)
+{
+        if (dpm_md.startup)
+                dpm_md.startup();
+}
+
+
+static inline void
+dpm_md_cleanup(void)
+{
+        if (dpm_md.cleanup)
+                dpm_md.cleanup();
+}
+
+
+static inline void
+dpm_md_idle(void)
+{
+        if (dpm_md.idle)
+                dpm_md.idle();
+}
+
+
+/* Machine-dependent operating point creating/query/setting */
+
+
+static inline int
+dpm_md_init_opt(struct dpm_opt *opt)
+{
+        if (dpm_md.init_opt)
+                return dpm_md.init_opt(opt);
+        return 0;
+}
+
+static inline int
+dpm_md_set_opt(struct dpm_opt *cur, struct dpm_opt *new)
+{
+        if (dpm_md.set_opt) {
+                return dpm_md.set_opt(cur, new);
+	}
+        return 0;
+}
+
+static inline int
+dpm_md_get_opt(struct dpm_opt *opt)
+{
+        if (dpm_md.get_opt)
+                return dpm_md.get_opt(opt);
+        return 0;
+}
+
+static inline int
+dpm_md_check_constraint(struct constraint_param *param, struct dpm_opt *opt)
+{
+        return dpm_md.check_constraint ?
+		dpm_md.check_constraint(param, opt) : 1;
+}
+
+/*
+ * Helper functions
+ */
+
+static inline char *
+dpm_classopt_name(struct dpm_policy *policy, int state)
+{
+	return policy->classopt[state].opt ?
+		policy->classopt[state].opt->name :
+		policy->classopt[state].class->name;
+}
+
+#endif /* CONFIG_DPM */
+#endif /*__DPM_H__*/
Index: linux-2.6.16/include/linux/init_task.h
===================================================================
--- linux-2.6.16.orig/include/linux/init_task.h	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/include/linux/init_task.h	2006-04-11 06:34:11.000000000 +0000
@@ -1,6 +1,7 @@
 #ifndef _LINUX__INIT_TASK_H
 #define _LINUX__INIT_TASK_H
 
+#include <linux/dpm.h>
 #include <linux/file.h>
 #include <linux/rcupdate.h>
 
@@ -116,6 +117,7 @@
 		.list = LIST_HEAD_INIT(tsk.pending.list),		\
 		.signal = {{0}}},					\
 	.blocked	= {{0}},					\
+	.dpm_state	= DPM_TASK_STATE,				\
 	.alloc_lock	= SPIN_LOCK_UNLOCKED,				\
 	.proc_lock	= SPIN_LOCK_UNLOCKED,				\
 	.journal_info	= NULL,						\
Index: linux-2.6.16/include/linux/pm.h
===================================================================
--- linux-2.6.16.orig/include/linux/pm.h	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/include/linux/pm.h	2006-04-11 06:35:28.000000000 +0000
@@ -131,6 +131,34 @@
 extern struct pm_ops *pm_ops;
 extern int pm_suspend(suspend_state_t state);
 
+struct device;
+
+struct constraint_param {
+	int id;
+	int min;
+	int max;
+};
+
+#define DPM_CONSTRAINT_PARAMS_MAX 20
+
+struct constraints {
+	int asserted;
+	int count;
+	int violations;
+	struct constraint_param param[DPM_CONSTRAINT_PARAMS_MAX];
+	struct list_head entry;
+};
+
+enum {
+	SCALE_PRECHANGE,
+	SCALE_POSTCHANGE,
+	SCALE_MAX
+};
+
+extern void assert_constraints(struct constraints *);
+extern void deassert_constraints(struct constraints *);
+extern void power_event(char *eventstr);
+extern void device_power_event(struct device * dev, char *eventstr);
 
 /*
  * Device power management
Index: linux-2.6.16/include/linux/sched.h
===================================================================
--- linux-2.6.16.orig/include/linux/sched.h	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/include/linux/sched.h	2006-04-11 06:34:11.000000000 +0000
@@ -860,6 +860,7 @@
 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
 	clock_t acct_stimexpd;	/* clock_t-converted stime since last update */
 #endif
+	int     dpm_state; /* DPM operating state to use for this task */
 #ifdef CONFIG_NUMA
   	struct mempolicy *mempolicy;
 	short il_next;
Index: linux-2.6.16/kernel/sched.c
===================================================================
--- linux-2.6.16.orig/kernel/sched.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/kernel/sched.c	2006-04-11 06:34:11.000000000 +0000
@@ -49,6 +49,8 @@
 #include <linux/syscalls.h>
 #include <linux/times.h>
 #include <linux/acct.h>
+#include <linux/dpm.h>
+
 #include <asm/tlb.h>
 
 #include <asm/unistd.h>
Index: linux-2.6.16/kernel/softirq.c
===================================================================
--- linux-2.6.16.orig/kernel/softirq.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/kernel/softirq.c	2006-04-11 06:34:11.000000000 +0000
@@ -45,6 +45,8 @@
 
 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+#include <linux/dpm.h>
+
 /*
  * we cannot loop indefinitely here to avoid userspace starvation,
  * but we also don't want to introduce a worst case 1/HZ latency
@@ -352,6 +354,11 @@
 	set_user_nice(current, 19);
 	current->flags |= PF_NOFREEZE;
 
+#ifdef CONFIG_DPM
+	/* Identify as a system task for DPM purposes */
+        current->dpm_state = DPM_NO_STATE;
+#endif
+
 	set_current_state(TASK_INTERRUPTIBLE);
 
 	while (!kthread_should_stop()) {
Index: linux-2.6.16/kernel/workqueue.c
===================================================================
--- linux-2.6.16.orig/kernel/workqueue.c	2006-03-20 05:53:29.000000000 +0000
+++ linux-2.6.16/kernel/workqueue.c	2006-04-11 06:34:11.000000000 +0000
@@ -23,6 +23,7 @@
 #include <linux/signal.h>
 #include <linux/completion.h>
 #include <linux/workqueue.h>
+#include <linux/dpm.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
@@ -195,6 +196,11 @@
 
 	set_user_nice(current, -5);
 
+#ifdef CONFIG_DPM
+	/* Identify as a system task for DPM purposes */
+        current->dpm_state = DPM_NO_STATE;
+#endif
+
 	/* Block and flush all signals */
 	sigfillset(&blocked);
 	sigprocmask(SIG_BLOCK, &blocked, NULL);