[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

[PATCH] bump to 3.0-rt6



this is just the result of gen-patch for 3.0-rt6 plus the necessary
modifications to use it and remove the now obsolete patch.

---
 changelog                                          |    3 +
 .../{patch-3.0-rt2.patch => patch-3.0-rt6.patch}   | 1004 ++++++++++++++++++--
 patches/series/2-extra                             |    1 +
 patches/series/base-extra                          |    2 -
 4 files changed, 919 insertions(+), 91 deletions(-)
 rename patches/features/all/rt/{patch-3.0-rt2.patch => patch-3.0-rt6.patch} (95%)
 create mode 100644 patches/series/2-extra
 delete mode 100644 patches/series/base-extra

diff --git a/changelog b/changelog
index 9199463..8751ac3 100644
--- a/changelog
+++ b/changelog
@@ -3,6 +3,9 @@ linux-2.6 (3.0.0-2) UNRELEASED; urgency=low
   [ Aurelien Jarno ]
   * Add configuration files for s390x architecture.
 
+  [ Uwe Kleine-König ]
+  * update rt featureset to 3.0-rt6
+
  -- Ben Hutchings <ben@decadent.org.uk>  Wed, 27 Jul 2011 23:58:10 +0200
 
 linux-2.6 (3.0.0-1) unstable; urgency=low
diff --git a/patches/features/all/rt/patch-3.0-rt2.patch b/patches/features/all/rt/patch-3.0-rt6.patch
similarity index 95%
rename from patches/features/all/rt/patch-3.0-rt2.patch
rename to patches/features/all/rt/patch-3.0-rt6.patch
index 8bb2f97..073d77d 100644
--- a/patches/features/all/rt/patch-3.0-rt2.patch
+++ b/patches/features/all/rt/patch-3.0-rt6.patch
@@ -558,7 +558,7 @@ Index: linux-2.6/kernel/sched.c
 +
 +static inline void sched_submit_work(struct task_struct *tsk)
 +{
-+	if (!tsk->state || tsk->pi_blocked_on)
++	if (!tsk->state || tsk_is_pi_blocked(tsk))
 +		return;
 +
 +	/*
@@ -578,7 +578,7 @@ Index: linux-2.6/kernel/sched.c
 +
 +static inline void sched_update_worker(struct task_struct *tsk)
 +{
-+	if (tsk->pi_blocked_on)
++	if (tsk_is_pi_blocked(tsk))
 +		return;
 +
 +	if (tsk->flags & PF_WQ_WORKER)
@@ -628,11 +628,10 @@ Index: linux-2.6/kernel/sched.c
  		local_irq_disable();
  		sub_preempt_count(PREEMPT_ACTIVE);
  
-@@ -4827,10 +4855,8 @@ long __sched sleep_on_timeout(wait_queue
- }
+@@ -4828,9 +4856,8 @@ long __sched sleep_on_timeout(wait_queue
  EXPORT_SYMBOL(sleep_on_timeout);
  
--#ifdef CONFIG_RT_MUTEXES
+ #ifdef CONFIG_RT_MUTEXES
 -
  /*
 - * rt_mutex_setprio - set the current priority of a task
@@ -640,7 +639,7 @@ Index: linux-2.6/kernel/sched.c
   * @p: task
   * @prio: prio value (kernel-internal form)
   *
-@@ -4839,7 +4865,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -4839,7 +4866,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
   *
   * Used by the rt_mutex code to implement priority inheritance logic.
   */
@@ -649,7 +648,7 @@ Index: linux-2.6/kernel/sched.c
  {
  	int oldprio, on_rq, running;
  	struct rq *rq;
-@@ -4849,6 +4875,24 @@ void rt_mutex_setprio(struct task_struct
+@@ -4849,6 +4876,24 @@ void rt_mutex_setprio(struct task_struct
  
  	rq = __task_rq_lock(p);
  
@@ -674,20 +673,18 @@ Index: linux-2.6/kernel/sched.c
  	trace_sched_pi_setprio(p, prio);
  	oldprio = p->prio;
  	prev_class = p->sched_class;
-@@ -4872,11 +4916,10 @@ void rt_mutex_setprio(struct task_struct
+@@ -4872,9 +4917,9 @@ void rt_mutex_setprio(struct task_struct
  		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
  
  	check_class_changed(rq, p, prev_class, oldprio);
 +out_unlock:
  	__task_rq_unlock(rq);
  }
- 
--#endif
 -
+ #endif
+ 
  void set_user_nice(struct task_struct *p, long nice)
- {
- 	int old_prio, delta, on_rq;
-@@ -5543,7 +5586,7 @@ SYSCALL_DEFINE0(sched_yield)
+@@ -5543,7 +5588,7 @@ SYSCALL_DEFINE0(sched_yield)
  	__release(rq->lock);
  	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  	do_raw_spin_unlock(&rq->lock);
@@ -696,7 +693,7 @@ Index: linux-2.6/kernel/sched.c
  
  	schedule();
  
-@@ -5557,9 +5600,17 @@ static inline int should_resched(void)
+@@ -5557,9 +5602,17 @@ static inline int should_resched(void)
  
  static void __cond_resched(void)
  {
@@ -717,7 +714,7 @@ Index: linux-2.6/kernel/sched.c
  }
  
  int __sched _cond_resched(void)
-@@ -5600,6 +5651,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5600,6 +5653,7 @@ int __cond_resched_lock(spinlock_t *lock
  }
  EXPORT_SYMBOL(__cond_resched_lock);
  
@@ -725,7 +722,7 @@ Index: linux-2.6/kernel/sched.c
  int __sched __cond_resched_softirq(void)
  {
  	BUG_ON(!in_softirq());
-@@ -5613,6 +5665,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5613,6 +5667,7 @@ int __sched __cond_resched_softirq(void)
  	return 0;
  }
  EXPORT_SYMBOL(__cond_resched_softirq);
@@ -733,7 +730,7 @@ Index: linux-2.6/kernel/sched.c
  
  /**
   * yield - yield the current processor to other threads.
-@@ -5859,7 +5912,7 @@ void show_state_filter(unsigned long sta
+@@ -5859,7 +5914,7 @@ void show_state_filter(unsigned long sta
  	printk(KERN_INFO
  		"  task                        PC stack   pid father\n");
  #endif
@@ -742,7 +739,7 @@ Index: linux-2.6/kernel/sched.c
  	do_each_thread(g, p) {
  		/*
  		 * reset the NMI-timeout, listing all files on a slow
-@@ -5875,7 +5928,7 @@ void show_state_filter(unsigned long sta
+@@ -5875,7 +5930,7 @@ void show_state_filter(unsigned long sta
  #ifdef CONFIG_SCHED_DEBUG
  	sysrq_sched_debug_show();
  #endif
@@ -751,7 +748,7 @@ Index: linux-2.6/kernel/sched.c
  	/*
  	 * Only show locks if all tasks are dumped:
  	 */
-@@ -5997,12 +6050,12 @@ static inline void sched_init_granularit
+@@ -5997,12 +6052,12 @@ static inline void sched_init_granularit
  #ifdef CONFIG_SMP
  void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
  {
@@ -768,7 +765,7 @@ Index: linux-2.6/kernel/sched.c
  }
  
  /*
-@@ -6053,7 +6106,7 @@ int set_cpus_allowed_ptr(struct task_str
+@@ -6053,7 +6108,7 @@ int set_cpus_allowed_ptr(struct task_str
  	do_set_cpus_allowed(p, new_mask);
  
  	/* Can the task run on the task's current CPU? If so, we're done */
@@ -777,7 +774,7 @@ Index: linux-2.6/kernel/sched.c
  		goto out;
  
  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -6072,6 +6125,83 @@ out:
+@@ -6072,6 +6127,83 @@ out:
  }
  EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  
@@ -861,7 +858,7 @@ Index: linux-2.6/kernel/sched.c
  /*
   * Move (not current) task off this cpu, onto dest cpu. We're doing
   * this because either it can't run here any more (set_cpus_allowed()
-@@ -6100,7 +6230,7 @@ static int __migrate_task(struct task_st
+@@ -6100,7 +6232,7 @@ static int __migrate_task(struct task_st
  	if (task_cpu(p) != src_cpu)
  		goto done;
  	/* Affinity changed (again). */
@@ -870,7 +867,7 @@ Index: linux-2.6/kernel/sched.c
  		goto fail;
  
  	/*
-@@ -6142,6 +6272,8 @@ static int migration_cpu_stop(void *data
+@@ -6142,6 +6274,8 @@ static int migration_cpu_stop(void *data
  
  #ifdef CONFIG_HOTPLUG_CPU
  
@@ -879,7 +876,7 @@ Index: linux-2.6/kernel/sched.c
  /*
   * Ensures that the idle task is using init_mm right before its cpu goes
   * offline.
-@@ -6154,7 +6286,12 @@ void idle_task_exit(void)
+@@ -6154,7 +6288,12 @@ void idle_task_exit(void)
  
  	if (mm != &init_mm)
  		switch_mm(mm, &init_mm, current);
@@ -893,7 +890,7 @@ Index: linux-2.6/kernel/sched.c
  }
  
  /*
-@@ -6472,6 +6609,12 @@ migration_call(struct notifier_block *nf
+@@ -6472,6 +6611,12 @@ migration_call(struct notifier_block *nf
  		migrate_nr_uninterruptible(rq);
  		calc_global_load_remove(rq);
  		break;
@@ -906,7 +903,7 @@ Index: linux-2.6/kernel/sched.c
  #endif
  	}
  
-@@ -8188,7 +8331,8 @@ void __init sched_init(void)
+@@ -8188,7 +8333,8 @@ void __init sched_init(void)
  #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  static inline int preempt_count_equals(int preempt_offset)
  {
@@ -1178,6 +1175,327 @@ Index: linux-2.6/kernel/workqueue_sched.h
 -				       unsigned int cpu);
 +void wq_worker_running(struct task_struct *task);
 +void wq_worker_sleeping(struct task_struct *task);
+Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
+ 	u32 full;
+ };
+ 
+-struct amd_l3_cache {
+-	struct	 amd_northbridge *nb;
+-	unsigned indices;
+-	u8	 subcaches[4];
+-};
+-
+-struct _cpuid4_info {
++struct _cpuid4_info_regs {
+ 	union _cpuid4_leaf_eax eax;
+ 	union _cpuid4_leaf_ebx ebx;
+ 	union _cpuid4_leaf_ecx ecx;
+ 	unsigned long size;
+-	struct amd_l3_cache *l3;
+-	DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
++	struct amd_northbridge *nb;
+ };
+ 
+-/* subset of above _cpuid4_info w/o shared_cpu_map */
+-struct _cpuid4_info_regs {
+-	union _cpuid4_leaf_eax eax;
+-	union _cpuid4_leaf_ebx ebx;
+-	union _cpuid4_leaf_ecx ecx;
+-	unsigned long size;
+-	struct amd_l3_cache *l3;
++struct _cpuid4_info {
++	struct _cpuid4_info_regs base;
++	DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+ };
+ 
+ unsigned short			num_cache_leaves;
+@@ -314,12 +303,13 @@ struct _cache_attr {
+ /*
+  * L3 cache descriptors
+  */
+-static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
++static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
+ {
++	struct amd_l3_cache *l3 = &nb->l3_cache;
+ 	unsigned int sc0, sc1, sc2, sc3;
+ 	u32 val = 0;
+ 
+-	pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
++	pci_read_config_dword(nb->misc, 0x1C4, &val);
+ 
+ 	/* calculate subcache sizes */
+ 	l3->subcaches[0] = sc0 = !(val & BIT(0));
+@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
+ static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
+ 					int index)
+ {
+-	static struct amd_l3_cache *__cpuinitdata l3_caches;
+ 	int node;
+ 
+ 	/* only for L3, and not in virtualized environments */
+-	if (index < 3 || amd_nb_num() == 0)
++	if (index < 3)
+ 		return;
+ 
+-	/*
+-	 * Strictly speaking, the amount in @size below is leaked since it is
+-	 * never freed but this is done only on shutdown so it doesn't matter.
+-	 */
+-	if (!l3_caches) {
+-		int size = amd_nb_num() * sizeof(struct amd_l3_cache);
+-
+-		l3_caches = kzalloc(size, GFP_ATOMIC);
+-		if (!l3_caches)
+-			return;
+-	}
+-
+ 	node = amd_get_nb_id(smp_processor_id());
+-
+-	if (!l3_caches[node].nb) {
+-		l3_caches[node].nb = node_to_amd_nb(node);
+-		amd_calc_l3_indices(&l3_caches[node]);
+-	}
+-
+-	this_leaf->l3 = &l3_caches[node];
++	this_leaf->nb = node_to_amd_nb(node);
++	if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
++		amd_calc_l3_indices(this_leaf->nb);
+ }
+ 
+ /*
+@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
+  *
+  * @returns: the disabled index if used or negative value if slot free.
+  */
+-int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
++int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
+ {
+ 	unsigned int reg = 0;
+ 
+-	pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg);
++	pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
+ 
+ 	/* check whether this slot is activated already */
+ 	if (reg & (3UL << 30))
+@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
+ {
+ 	int index;
+ 
+-	if (!this_leaf->l3 ||
+-	    !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++	if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ 		return -EINVAL;
+ 
+-	index = amd_get_l3_disable_slot(this_leaf->l3, slot);
++	index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
+ 	if (index >= 0)
+ 		return sprintf(buf, "%d\n", index);
+ 
+@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
+ SHOW_CACHE_DISABLE(0)
+ SHOW_CACHE_DISABLE(1)
+ 
+-static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
++static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
+ 				 unsigned slot, unsigned long idx)
+ {
+ 	int i;
+@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct 
+ 	for (i = 0; i < 4; i++) {
+ 		u32 reg = idx | (i << 20);
+ 
+-		if (!l3->subcaches[i])
++		if (!nb->l3_cache.subcaches[i])
+ 			continue;
+ 
+-		pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ 
+ 		/*
+ 		 * We need to WBINVD on a core on the node containing the L3
+@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct 
+ 		wbinvd_on_cpu(cpu);
+ 
+ 		reg |= BIT(31);
+-		pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++		pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ 	}
+ }
+ 
+@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct 
+  *
+  * @return: 0 on success, error status on failure
+  */
+-int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
++int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
+ 			    unsigned long index)
+ {
+ 	int ret = 0;
+ 
+ 	/*  check if @slot is already used or the index is already disabled */
+-	ret = amd_get_l3_disable_slot(l3, slot);
++	ret = amd_get_l3_disable_slot(nb, slot);
+ 	if (ret >= 0)
+ 		return -EINVAL;
+ 
+-	if (index > l3->indices)
++	if (index > nb->l3_cache.indices)
+ 		return -EINVAL;
+ 
+ 	/* check whether the other slot has disabled the same index already */
+-	if (index == amd_get_l3_disable_slot(l3, !slot))
++	if (index == amd_get_l3_disable_slot(nb, !slot))
+ 		return -EINVAL;
+ 
+-	amd_l3_disable_index(l3, cpu, slot, index);
++	amd_l3_disable_index(nb, cpu, slot, index);
+ 
+ 	return 0;
+ }
+@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	if (!this_leaf->l3 ||
+-	    !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++	if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ 		return -EINVAL;
+ 
+ 	cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
+ 	if (strict_strtoul(buf, 10, &val) < 0)
+ 		return -EINVAL;
+ 
+-	err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
++	err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
+ 	if (err) {
+ 		if (err == -EEXIST)
+ 			printk(KERN_WARNING "L3 disable slot %d in use!\n",
+@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
+ static ssize_t
+ show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+ {
+-	if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++	if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ 		return -EINVAL;
+ 
+ 	return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
+@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++	if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ 		return -EINVAL;
+ 
+ 	if (strict_strtoul(buf, 16, &val) < 0)
+@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
+ 		return;
+ 	}
+ 	this_leaf = CPUID4_INFO_IDX(cpu, index);
+-	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
++	num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
+ 
+ 	if (num_threads_sharing == 1)
+ 		cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
+@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
+ 	for (i = 0; i < num_cache_leaves; i++)
+ 		cache_remove_shared_cpu_map(cpu, i);
+ 
+-	kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
+ 	kfree(per_cpu(ici_cpuid4_info, cpu));
+ 	per_cpu(ici_cpuid4_info, cpu) = NULL;
+ }
+ 
+-static int
+-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+-{
+-	struct _cpuid4_info_regs *leaf_regs =
+-		(struct _cpuid4_info_regs *)this_leaf;
+-
+-	return cpuid4_cache_lookup_regs(index, leaf_regs);
+-}
+-
+ static void __cpuinit get_cpu_leaves(void *_retval)
+ {
+ 	int j, *retval = _retval, cpu = smp_processor_id();
+ 
+ 	/* Do cpuid and store the results */
+ 	for (j = 0; j < num_cache_leaves; j++) {
+-		struct _cpuid4_info *this_leaf;
+-		this_leaf = CPUID4_INFO_IDX(cpu, j);
+-		*retval = cpuid4_cache_lookup(j, this_leaf);
++		struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
++
++		*retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
+ 		if (unlikely(*retval < 0)) {
+ 			int i;
+ 
+@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
+ 	return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
+ }
+ 
+-show_one_plus(level, eax.split.level, 0);
+-show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
+-show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
+-show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
+-show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
++show_one_plus(level, base.eax.split.level, 0);
++show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
++show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
++show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
++show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
+ 
+ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
+ 			 unsigned int cpu)
+ {
+-	return sprintf(buf, "%luK\n", this_leaf->size / 1024);
++	return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
+ }
+ 
+ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
+@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
+ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
+ 			 unsigned int cpu)
+ {
+-	switch (this_leaf->eax.split.type) {
++	switch (this_leaf->base.eax.split.type) {
+ 	case CACHE_TYPE_DATA:
+ 		return sprintf(buf, "Data\n");
+ 	case CACHE_TYPE_INST:
+@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc
+ 
+ 		ktype_cache.default_attrs = default_attrs;
+ #ifdef CONFIG_AMD_NB
+-		if (this_leaf->l3)
++		if (this_leaf->base.nb)
+ 			ktype_cache.default_attrs = amd_l3_attrs();
+ #endif
+ 		retval = kobject_init_and_add(&(this_object->kobj),
+Index: linux-2.6/arch/x86/include/asm/amd_nb.h
+===================================================================
+--- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
++++ linux-2.6/arch/x86/include/asm/amd_nb.h
+@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
+ extern int amd_get_subcaches(int);
+ extern int amd_set_subcaches(int, int);
+ 
++struct amd_l3_cache {
++	unsigned indices;
++	u8	 subcaches[4];
++};
++
+ struct amd_northbridge {
+ 	struct pci_dev *misc;
+ 	struct pci_dev *link;
++	struct amd_l3_cache l3_cache;
+ };
+ 
+ struct amd_northbridge_info {
 Index: linux-2.6/arch/mips/sibyte/sb1250/irq.c
 ===================================================================
 --- linux-2.6.orig/arch/mips/sibyte/sb1250/irq.c
@@ -1562,6 +1880,110 @@ Index: linux-2.6/arch/mips/kernel/signal.c
  	if (test_thread_flag(TIF_RESTORE_SIGMASK))
  		oldset = &current->saved_sigmask;
  	else
+Index: linux-2.6/arch/arm/kernel/signal.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/signal.c
++++ linux-2.6/arch/arm/kernel/signal.c
+@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
+ 	if (!user_mode(regs))
+ 		return;
+ 
++	local_irq_enable();
++	preempt_check_resched();
++
+ 	/*
+ 	 * If we were from a system call, check for system call restarting...
+ 	 */
+Index: linux-2.6/kernel/time/clocksource.c
+===================================================================
+--- linux-2.6.orig/kernel/time/clocksource.c
++++ linux-2.6/kernel/time/clocksource.c
+@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
+ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
++static atomic_t watchdog_reset_pending;
+ 
+ static int clocksource_watchdog_kthread(void *data);
+ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigne
+ 	struct clocksource *cs;
+ 	cycle_t csnow, wdnow;
+ 	int64_t wd_nsec, cs_nsec;
+-	int next_cpu;
++	int next_cpu, reset_pending;
+ 
+ 	spin_lock(&watchdog_lock);
+ 	if (!watchdog_running)
+ 		goto out;
+ 
++	reset_pending = atomic_read(&watchdog_reset_pending);
++
+ 	list_for_each_entry(cs, &watchdog_list, wd_list) {
+ 
+ 		/* Clocksource already marked unstable? */
+@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigne
+ 		local_irq_enable();
+ 
+ 		/* Clocksource initialized ? */
+-		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
++		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
++		    atomic_read(&watchdog_reset_pending)) {
+ 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
+ 			cs->wd_last = wdnow;
+ 			cs->cs_last = csnow;
+@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigne
+ 		cs->cs_last = csnow;
+ 		cs->wd_last = wdnow;
+ 
++		if (atomic_read(&watchdog_reset_pending))
++			continue;
++
+ 		/* Check the deviation from the watchdog clocksource. */
+-		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
++		if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+ 			clocksource_unstable(cs, cs_nsec - wd_nsec);
+ 			continue;
+ 		}
+@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigne
+ 	}
+ 
+ 	/*
++	 * We only clear the watchdog_reset_pending, when we did a
++	 * full cycle through all clocksources.
++	 */
++	if (reset_pending)
++		atomic_dec(&watchdog_reset_pending);
++
++	/*
+ 	 * Cycle through CPUs to check if the CPUs stay synchronized
+ 	 * to each other.
+ 	 */
+@@ -344,23 +358,7 @@ static inline void clocksource_reset_wat
+ 
+ static void clocksource_resume_watchdog(void)
+ {
+-	unsigned long flags;
+-
+-	/*
+-	 * We use trylock here to avoid a potential dead lock when
+-	 * kgdb calls this code after the kernel has been stopped with
+-	 * watchdog_lock held. When watchdog_lock is held we just
+-	 * return and accept, that the watchdog might trigger and mark
+-	 * the monitored clock source (usually TSC) unstable.
+-	 *
+-	 * This does not affect the other caller clocksource_resume()
+-	 * because at this point the kernel is UP, interrupts are
+-	 * disabled and nothing can hold watchdog_lock.
+-	 */
+-	if (!spin_trylock_irqsave(&watchdog_lock, flags))
+-		return;
+-	clocksource_reset_watchdog();
+-	spin_unlock_irqrestore(&watchdog_lock, flags);
++	atomic_inc(&watchdog_reset_pending);
+ }
+ 
+ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 Index: linux-2.6/kernel/watchdog.c
 ===================================================================
 --- linux-2.6.orig/kernel/watchdog.c
@@ -1977,13 +2399,11 @@ Index: linux-2.6/include/linux/sched.h
  #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
  #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
  #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
-@@ -2021,9 +2046,14 @@ static inline void sched_autogroup_fork(
- static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+@@ -2022,15 +2047,27 @@ static inline void sched_autogroup_exit(
  #endif
  
-+extern void task_setprio(struct task_struct *p, int prio);
-+
  #ifdef CONFIG_RT_MUTEXES
++extern void task_setprio(struct task_struct *p, int prio);
  extern int rt_mutex_getprio(struct task_struct *p);
 -extern void rt_mutex_setprio(struct task_struct *p, int prio);
 +static inline void rt_mutex_setprio(struct task_struct *p, int prio)
@@ -1991,9 +2411,24 @@ Index: linux-2.6/include/linux/sched.h
 +	task_setprio(p, prio);
 +}
  extern void rt_mutex_adjust_pi(struct task_struct *p);
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++	return tsk->pi_blocked_on != NULL;
++}
  #else
  static inline int rt_mutex_getprio(struct task_struct *p)
-@@ -2110,6 +2140,7 @@ extern void xtime_update(unsigned long t
+ {
+ 	return p->normal_prio;
+ }
+ # define rt_mutex_adjust_pi(p)		do { } while (0)
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++	return false;
++}
+ #endif
+ 
+ extern bool yield_to(struct task_struct *p, bool preempt);
+@@ -2110,6 +2147,7 @@ extern void xtime_update(unsigned long t
  
  extern int wake_up_state(struct task_struct *tsk, unsigned int state);
  extern int wake_up_process(struct task_struct *tsk);
@@ -2001,7 +2436,7 @@ Index: linux-2.6/include/linux/sched.h
  extern void wake_up_new_task(struct task_struct *tsk);
  #ifdef CONFIG_SMP
   extern void kick_process(struct task_struct *tsk);
-@@ -2199,12 +2230,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2199,12 +2237,24 @@ extern struct mm_struct * mm_alloc(void)
  
  /* mmdrop drops the mm and the page tables */
  extern void __mmdrop(struct mm_struct *);
@@ -2026,7 +2461,7 @@ Index: linux-2.6/include/linux/sched.h
  /* mmput gets rid of the mappings and all user-space */
  extern void mmput(struct mm_struct *);
  /* Grab a reference to a task's mm, if it is not already going away */
-@@ -2510,7 +2553,7 @@ extern int _cond_resched(void);
+@@ -2510,7 +2560,7 @@ extern int _cond_resched(void);
  
  extern int __cond_resched_lock(spinlock_t *lock);
  
@@ -2035,7 +2470,7 @@ Index: linux-2.6/include/linux/sched.h
  #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
  #else
  #define PREEMPT_LOCK_OFFSET	0
-@@ -2521,12 +2564,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2521,12 +2571,16 @@ extern int __cond_resched_lock(spinlock_
  	__cond_resched_lock(lock);				\
  })
  
@@ -2052,7 +2487,7 @@ Index: linux-2.6/include/linux/sched.h
  
  /*
   * Does a critical section need to be broken due to another
-@@ -2550,7 +2597,7 @@ void thread_group_cputimer(struct task_s
+@@ -2550,7 +2604,7 @@ void thread_group_cputimer(struct task_s
  
  static inline void thread_group_cputime_init(struct signal_struct *sig)
  {
@@ -2061,7 +2496,7 @@ Index: linux-2.6/include/linux/sched.h
  }
  
  /*
-@@ -2589,6 +2636,15 @@ static inline void set_task_cpu(struct t
+@@ -2589,6 +2643,15 @@ static inline void set_task_cpu(struct t
  
  #endif /* CONFIG_SMP */
  
@@ -2589,7 +3024,7 @@ Index: linux-2.6/kernel/softirq.c
 +	}
 +
 +	if (warnpending) {
-+		printk(KERN_ERR "NOHZ: local_softirq_pending %02lx\n",
++		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
 +		       pending);
 +		rate_limit++;
 +	}
@@ -2603,7 +3038,7 @@ Index: linux-2.6/kernel/softirq.c
 +	static int rate_limit;
 +
 +	if (rate_limit < 10) {
-+		printk(KERN_ERR "NOHZ: local_softirq_pending %02lx\n",
++		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
 +		       local_softirq_pending());
 +		rate_limit++;
 +	}
@@ -3668,7 +4103,14 @@ Index: linux-2.6/kernel/trace/trace_irqsoff.c
 ===================================================================
 --- linux-2.6.orig/kernel/trace/trace_irqsoff.c
 +++ linux-2.6/kernel/trace/trace_irqsoff.c
-@@ -23,7 +23,7 @@ static int				tracer_enabled __read_most
+@@ -17,13 +17,14 @@
+ #include <linux/fs.h>
+ 
+ #include "trace.h"
++#include <trace/events/hist.h>
+ 
+ static struct trace_array		*irqsoff_trace __read_mostly;
+ static int				tracer_enabled __read_mostly;
  
  static DEFINE_PER_CPU(int, tracing_cpu);
  
@@ -3677,7 +4119,7 @@ Index: linux-2.6/kernel/trace/trace_irqsoff.c
  
  enum {
  	TRACER_IRQS_OFF		= (1 << 1),
-@@ -319,7 +319,7 @@ check_critical_timing(struct trace_array
+@@ -319,7 +320,7 @@ check_critical_timing(struct trace_array
  	if (!report_latency(delta))
  		goto out;
  
@@ -3686,7 +4128,7 @@ Index: linux-2.6/kernel/trace/trace_irqsoff.c
  
  	/* check if we are still the max latency */
  	if (!report_latency(delta))
-@@ -342,7 +342,7 @@ check_critical_timing(struct trace_array
+@@ -342,7 +343,7 @@ check_critical_timing(struct trace_array
  	max_sequence++;
  
  out_unlock:
@@ -3695,6 +4137,81 @@ Index: linux-2.6/kernel/trace/trace_irqsoff.c
  
  out:
  	data->critical_sequence = max_sequence;
+@@ -424,11 +425,13 @@ void start_critical_timings(void)
+ {
+ 	if (preempt_trace() || irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++	trace_preemptirqsoff_hist(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+ 
+ void stop_critical_timings(void)
+ {
++	trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ 	if (preempt_trace() || irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(a0, a1);
+ }
+@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(a0, a1);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ 
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
+  */
+ void trace_hardirqs_on(void)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
+ 
+ void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned 
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, caller_addr);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+ 
+@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ 	if (preempt_trace())
+ 		stop_critical_timing(a0, a1);
+ }
+ 
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(PREEMPT_OFF, 1);
+ 	if (preempt_trace())
+ 		start_critical_timing(a0, a1);
+ }
 Index: linux-2.6/include/linux/ratelimit.h
 ===================================================================
 --- linux-2.6.orig/include/linux/ratelimit.h
@@ -5944,6 +6461,100 @@ Index: linux-2.6/drivers/dca/dca-core.c
  
  	dca_sysfs_remove_provider(dca);
  }
+Index: linux-2.6/arch/arm/common/gic.c
+===================================================================
+--- linux-2.6.orig/arch/arm/common/gic.c
++++ linux-2.6/arch/arm/common/gic.c
+@@ -33,7 +33,7 @@
+ #include <asm/mach/irq.h>
+ #include <asm/hardware/gic.h>
+ 
+-static DEFINE_SPINLOCK(irq_controller_lock);
++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+ 
+ /* Address of GIC 0 CPU interface */
+ void __iomem *gic_cpu_base_addr __read_mostly;
+@@ -88,30 +88,30 @@ static void gic_mask_irq(struct irq_data
+ {
+ 	u32 mask = 1 << (d->irq % 32);
+ 
+-	spin_lock(&irq_controller_lock);
++	raw_spin_lock(&irq_controller_lock);
+ 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ 	if (gic_arch_extn.irq_mask)
+ 		gic_arch_extn.irq_mask(d);
+-	spin_unlock(&irq_controller_lock);
++	raw_spin_unlock(&irq_controller_lock);
+ }
+ 
+ static void gic_unmask_irq(struct irq_data *d)
+ {
+ 	u32 mask = 1 << (d->irq % 32);
+ 
+-	spin_lock(&irq_controller_lock);
++	raw_spin_lock(&irq_controller_lock);
+ 	if (gic_arch_extn.irq_unmask)
+ 		gic_arch_extn.irq_unmask(d);
+ 	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+-	spin_unlock(&irq_controller_lock);
++	raw_spin_unlock(&irq_controller_lock);
+ }
+ 
+ static void gic_eoi_irq(struct irq_data *d)
+ {
+ 	if (gic_arch_extn.irq_eoi) {
+-		spin_lock(&irq_controller_lock);
++		raw_spin_lock(&irq_controller_lock);
+ 		gic_arch_extn.irq_eoi(d);
+-		spin_unlock(&irq_controller_lock);
++		raw_spin_unlock(&irq_controller_lock);
+ 	}
+ 
+ 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data 
+ 	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ 		return -EINVAL;
+ 
+-	spin_lock(&irq_controller_lock);
++	raw_spin_lock(&irq_controller_lock);
+ 
+ 	if (gic_arch_extn.irq_set_type)
+ 		gic_arch_extn.irq_set_type(d, type);
+@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data 
+ 	if (enabled)
+ 		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+ 
+-	spin_unlock(&irq_controller_lock);
++	raw_spin_unlock(&irq_controller_lock);
+ 
+ 	return 0;
+ }
+@@ -188,11 +188,11 @@ static int gic_set_affinity(struct irq_d
+ 	mask = 0xff << shift;
+ 	bit = 1 << (cpu + shift);
+ 
+-	spin_lock(&irq_controller_lock);
++	raw_spin_lock(&irq_controller_lock);
+ 	d->node = cpu;
+ 	val = readl_relaxed(reg) & ~mask;
+ 	writel_relaxed(val | bit, reg);
+-	spin_unlock(&irq_controller_lock);
++	raw_spin_unlock(&irq_controller_lock);
+ 
+ 	return 0;
+ }
+@@ -222,9 +222,9 @@ static void gic_handle_cascade_irq(unsig
+ 
+ 	chained_irq_enter(chip, desc);
+ 
+-	spin_lock(&irq_controller_lock);
++	raw_spin_lock(&irq_controller_lock);
+ 	status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
+-	spin_unlock(&irq_controller_lock);
++	raw_spin_unlock(&irq_controller_lock);
+ 
+ 	gic_irq = (status & 0x3ff);
+ 	if (gic_irq == 1023)
 Index: linux-2.6/arch/arm/include/asm/dma.h
 ===================================================================
 --- linux-2.6.orig/arch/arm/include/asm/dma.h
@@ -5970,6 +6581,28 @@ Index: linux-2.6/arch/arm/include/asm/dma.h
  }
  
  /* Clear the 'DMA Pointer Flip Flop'.
+Index: linux-2.6/arch/arm/include/asm/mmu.h
+===================================================================
+--- linux-2.6.orig/arch/arm/include/asm/mmu.h
++++ linux-2.6/arch/arm/include/asm/mmu.h
+@@ -6,7 +6,7 @@
+ typedef struct {
+ #ifdef CONFIG_CPU_HAS_ASID
+ 	unsigned int id;
+-	spinlock_t id_lock;
++	raw_spinlock_t id_lock;
+ #endif
+ 	unsigned int kvm_seq;
+ } mm_context_t;
+@@ -16,7 +16,7 @@ typedef struct {
+ 
+ /* init_mm.context.id_lock should be initialized. */
+ #define INIT_MM_CONTEXT(name)                                                 \
+-	.context.id_lock    = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
++	.context.id_lock    = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ #else
+ #define ASID(mm)	(0)
+ #endif
 Index: linux-2.6/arch/arm/kernel/dma.c
 ===================================================================
 --- linux-2.6.orig/arch/arm/kernel/dma.c
@@ -6571,6 +7204,33 @@ Index: linux-2.6/arch/arm/mm/context.c
  unsigned int cpu_last_asid = ASID_FIRST_VERSION;
  #ifdef CONFIG_SMP
  DEFINE_PER_CPU(struct mm_struct *, current_mm);
+@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, curre
+ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ 	mm->context.id = 0;
+-	spin_lock_init(&mm->context.id_lock);
++	raw_spin_lock_init(&mm->context.id_lock);
+ }
+ 
+ static void flush_context(void)
+@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_str
+ 	 * the broadcast. This function is also called via IPI so the
+ 	 * mm->context.id_lock has to be IRQ-safe.
+ 	 */
+-	spin_lock_irqsave(&mm->context.id_lock, flags);
++	raw_spin_lock_irqsave(&mm->context.id_lock, flags);
+ 	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
+ 		/*
+ 		 * Old version of ASID found. Set the new one and
+@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_str
+ 		mm->context.id = asid;
+ 		cpumask_clear(mm_cpumask(mm));
+ 	}
+-	spin_unlock_irqrestore(&mm->context.id_lock, flags);
++	raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
+ 
+ 	/*
+ 	 * Set the mm_cpumask(mm) bit for the current CPU.
 @@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
  {
  	unsigned int asid;
@@ -11033,20 +11693,6 @@ Index: linux-2.6/net/core/dev.c
  		sd->completion_queue = NULL;
  		INIT_LIST_HEAD(&sd->poll_list);
  		sd->output_queue = NULL;
-Index: linux-2.6/arch/arm/kernel/signal.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/signal.c
-+++ linux-2.6/arch/arm/kernel/signal.c
-@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
- 	if (!user_mode(regs))
- 		return;
- 
-+	local_irq_enable();
-+	preempt_check_resched();
-+
- 	/*
- 	 * If we were from a system call, check for system call restarting...
- 	 */
 Index: linux-2.6/arch/x86/kernel/apic/io_apic.c
 ===================================================================
 --- linux-2.6.orig/arch/x86/kernel/apic/io_apic.c
@@ -11250,6 +11896,19 @@ Index: linux-2.6/kernel/rcutree_plugin.h
  }
  
  #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+Index: linux-2.6/drivers/usb/gadget/ci13xxx_udc.c
+===================================================================
+--- linux-2.6.orig/drivers/usb/gadget/ci13xxx_udc.c
++++ linux-2.6/drivers/usb/gadget/ci13xxx_udc.c
+@@ -816,7 +816,7 @@ static struct {
+ } dbg_data = {
+ 	.idx = 0,
+ 	.tty = 0,
+-	.lck = __RW_LOCK_UNLOCKED(lck)
++	.lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
+ };
+ 
+ /**
 Index: linux-2.6/fs/file.c
 ===================================================================
 --- linux-2.6.orig/fs/file.c
@@ -13646,15 +14305,13 @@ Index: linux-2.6/kernel/hrtimer.c
  	timerqueue_init(&timer->node);
  
  #ifdef CONFIG_TIMER_STATS
-@@ -1232,6 +1294,118 @@ static void __run_hrtimer(struct hrtimer
+@@ -1232,6 +1294,116 @@ static void __run_hrtimer(struct hrtimer
  	timer->state &= ~HRTIMER_STATE_CALLBACK;
  }
  
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+
 +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
 +
++#ifdef CONFIG_PREEMPT_RT_BASE
 +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
 +				 struct hrtimer_clock_base *base)
 +{
@@ -13765,7 +14422,7 @@ Index: linux-2.6/kernel/hrtimer.c
  #ifdef CONFIG_HIGH_RES_TIMERS
  
  /*
-@@ -1242,7 +1416,7 @@ void hrtimer_interrupt(struct clock_even
+@@ -1242,7 +1414,7 @@ void hrtimer_interrupt(struct clock_even
  {
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	ktime_t expires_next, now, entry_time, delta;
@@ -13774,7 +14431,7 @@ Index: linux-2.6/kernel/hrtimer.c
  
  	BUG_ON(!cpu_base->hres_active);
  	cpu_base->nr_events++;
-@@ -1278,6 +1452,14 @@ retry:
+@@ -1278,6 +1450,14 @@ retry:
  
  			timer = container_of(node, struct hrtimer, node);
  
@@ -13789,7 +14446,7 @@ Index: linux-2.6/kernel/hrtimer.c
  			/*
  			 * The immediate goal for using the softexpires is
  			 * minimizing wakeups, not running timers at the
-@@ -1301,7 +1483,10 @@ retry:
+@@ -1301,7 +1481,10 @@ retry:
  				break;
  			}
  
@@ -13801,7 +14458,7 @@ Index: linux-2.6/kernel/hrtimer.c
  		}
  	}
  
-@@ -1316,6 +1501,10 @@ retry:
+@@ -1316,6 +1499,10 @@ retry:
  	if (expires_next.tv64 == KTIME_MAX ||
  	    !tick_program_event(expires_next, 0)) {
  		cpu_base->hang_detected = 0;
@@ -13812,7 +14469,7 @@ Index: linux-2.6/kernel/hrtimer.c
  		return;
  	}
  
-@@ -1391,17 +1580,17 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1391,17 +1578,17 @@ void hrtimer_peek_ahead_timers(void)
  	local_irq_restore(flags);
  }
  
@@ -13835,7 +14492,7 @@ Index: linux-2.6/kernel/hrtimer.c
  /*
   * Called from timer softirq every jiffy, expire hrtimers:
   *
-@@ -1434,7 +1623,7 @@ void hrtimer_run_queues(void)
+@@ -1434,7 +1621,7 @@ void hrtimer_run_queues(void)
  	struct timerqueue_node *node;
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	struct hrtimer_clock_base *base;
@@ -13844,7 +14501,7 @@ Index: linux-2.6/kernel/hrtimer.c
  
  	if (hrtimer_hres_active())
  		return;
-@@ -1459,10 +1648,16 @@ void hrtimer_run_queues(void)
+@@ -1459,10 +1646,16 @@ void hrtimer_run_queues(void)
  					hrtimer_get_expires_tv64(timer))
  				break;
  
@@ -13862,7 +14519,7 @@ Index: linux-2.6/kernel/hrtimer.c
  }
  
  /*
-@@ -1484,6 +1679,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1484,6 +1677,7 @@ static enum hrtimer_restart hrtimer_wake
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  {
  	sl->timer.function = hrtimer_wakeup;
@@ -13870,7 +14527,7 @@ Index: linux-2.6/kernel/hrtimer.c
  	sl->task = task;
  }
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1622,9 +1818,13 @@ static void __cpuinit init_hrtimers_cpu(
+@@ -1622,9 +1816,13 @@ static void __cpuinit init_hrtimers_cpu(
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  		cpu_base->clock_base[i].cpu_base = cpu_base;
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -13884,7 +14541,7 @@ Index: linux-2.6/kernel/hrtimer.c
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
-@@ -1737,9 +1937,7 @@ void __init hrtimers_init(void)
+@@ -1737,9 +1935,7 @@ void __init hrtimers_init(void)
  	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  			  (void *)(long)smp_processor_id());
  	register_cpu_notifier(&hrtimers_nb);
@@ -14042,7 +14699,7 @@ Index: linux-2.6/kernel/trace/latency_hist.c
 ===================================================================
 --- /dev/null
 +++ linux-2.6/kernel/trace/latency_hist.c
-@@ -0,0 +1,1166 @@
+@@ -0,0 +1,1170 @@
 +/*
 + * kernel/trace/latency_hist.c
 + *
@@ -14129,7 +14786,7 @@ Index: linux-2.6/kernel/trace/latency_hist.c
 +#endif
 +
 +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(int reason, int start);
++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
 +static struct enable_data preemptirqsoff_enabled_data = {
 +	.latency_type = PREEMPTIRQSOFF_LATENCY,
 +	.enabled = 0,
@@ -14403,6 +15060,8 @@ Index: linux-2.6/kernel/trace/latency_hist.c
 +	.release = seq_release,
 +};
 +
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
 +static void clear_maxlatprocdata(struct maxlatproc_data *mp)
 +{
 +	mp->comm[0] = mp->current_comm[0] = '\0';
@@ -14410,6 +15069,7 @@ Index: linux-2.6/kernel/trace/latency_hist.c
 +	    mp->latency = mp->timeroffset = -1;
 +	mp->timestamp = 0;
 +}
++#endif
 +
 +static void hist_reset(struct hist_data *hist)
 +{
@@ -14780,7 +15440,8 @@ Index: linux-2.6/kernel/trace/latency_hist.c
 +#endif
 +
 +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
-+static notrace void probe_preemptirqsoff_hist(int reason, int starthist)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason,
++    int starthist)
 +{
 +	int cpu = raw_smp_processor_id();
 +	int time_set = 0;
@@ -16878,7 +17539,7 @@ Index: linux-2.6/arch/x86/kernel/early_printk.c
 ===================================================================
 --- linux-2.6.orig/arch/x86/kernel/early_printk.c
 +++ linux-2.6/arch/x86/kernel/early_printk.c
-@@ -169,22 +169,6 @@ static struct console early_serial_conso
+@@ -169,25 +169,9 @@ static struct console early_serial_conso
  	.index =	-1,
  };
  
@@ -16900,7 +17561,11 @@ Index: linux-2.6/arch/x86/kernel/early_printk.c
 -
  static inline void early_console_register(struct console *con, int keep_early)
  {
- 	if (early_console->index != -1) {
+-	if (early_console->index != -1) {
++	if (con->index != -1) {
+ 		printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
+ 		       con->name);
+ 		return;
 @@ -207,9 +191,8 @@ static int __init setup_early_printk(cha
  	if (!buf)
  		return 0;
@@ -19628,7 +20293,7 @@ Index: linux-2.6/kernel/cpu.c
 ===================================================================
 --- linux-2.6.orig/kernel/cpu.c
 +++ linux-2.6/kernel/cpu.c
-@@ -57,6 +57,102 @@ static struct {
+@@ -57,6 +57,104 @@ static struct {
  	.refcount = 0,
  };
  
@@ -19650,9 +20315,11 @@ Index: linux-2.6/kernel/cpu.c
 + */
 +void pin_current_cpu(void)
 +{
-+	struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++	struct hotplug_pcp *hp;
 +
 +retry:
++	hp = &__get_cpu_var(hotplug_pcp);
++
 +	if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
 +	    hp->unplug == current || (current->flags & PF_STOMPER)) {
 +		hp->refcount++;
@@ -19731,7 +20398,7 @@ Index: linux-2.6/kernel/cpu.c
  void get_online_cpus(void)
  {
  	might_sleep();
-@@ -210,13 +306,14 @@ static int __ref take_cpu_down(void *_pa
+@@ -210,13 +308,14 @@ static int __ref take_cpu_down(void *_pa
  /* Requires cpu_add_remove_lock to be held */
  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  {
@@ -19747,7 +20414,7 @@ Index: linux-2.6/kernel/cpu.c
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
-@@ -224,7 +321,19 @@ static int __ref _cpu_down(unsigned int 
+@@ -224,7 +323,19 @@ static int __ref _cpu_down(unsigned int 
  	if (!cpu_online(cpu))
  		return -EINVAL;
  
@@ -19768,7 +20435,7 @@ Index: linux-2.6/kernel/cpu.c
  
  	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
  	if (err) {
-@@ -232,7 +341,16 @@ static int __ref _cpu_down(unsigned int 
+@@ -232,7 +343,16 @@ static int __ref _cpu_down(unsigned int 
  		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
  		printk("%s: attempt to take down CPU %u failed\n",
  				__func__, cpu);
@@ -19786,7 +20453,7 @@ Index: linux-2.6/kernel/cpu.c
  	}
  
  	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
-@@ -263,6 +381,9 @@ static int __ref _cpu_down(unsigned int 
+@@ -263,6 +383,9 @@ static int __ref _cpu_down(unsigned int 
  	check_for_tasks(cpu);
  
  out_release:
@@ -19897,7 +20564,7 @@ Index: linux-2.6/include/linux/hardirq.h
 +# define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
 +# define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
 +#else
-+# define softirq_count()	(0)
++# define softirq_count()	(0U)
 +extern int in_serving_softirq(void);
 +#endif
 +
@@ -20817,6 +21484,15 @@ Index: linux-2.6/kernel/rtmutex.c
  	plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
  
  	debug_rt_mutex_init(lock, name);
+@@ -909,7 +1296,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ 				struct task_struct *proxy_owner)
+ {
+-	__rt_mutex_init(lock, NULL);
++	rt_mutex_init(lock);
+ 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ 	rt_mutex_set_owner(lock, proxy_owner);
+ 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
 Index: linux-2.6/kernel/futex.c
 ===================================================================
 --- linux-2.6.orig/kernel/futex.c
@@ -22640,7 +23316,15 @@ Index: linux-2.6/drivers/tty/serial/8250.c
 ===================================================================
 --- linux-2.6.orig/drivers/tty/serial/8250.c
 +++ linux-2.6/drivers/tty/serial/8250.c
-@@ -1678,12 +1678,14 @@ static irqreturn_t serial8250_interrupt(
+@@ -38,6 +38,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+ 
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -1678,12 +1679,14 @@ static irqreturn_t serial8250_interrupt(
  
  		l = l->next;
  
@@ -22655,26 +23339,29 @@ Index: linux-2.6/drivers/tty/serial/8250.c
  	} while (l != end);
  
  	spin_unlock(&i->lock);
-@@ -2892,14 +2894,10 @@ serial8250_console_write(struct console 
+@@ -2892,14 +2895,14 @@ serial8250_console_write(struct console 
  
  	touch_nmi_watchdog();
  
 -	local_irq_save(flags);
 -	if (up->port.sysrq) {
 -		/* serial8250_handle_port() already took the lock */
--		locked = 0;
++	if (unlikely(in_kdb_printk())) {
+ 		locked = 0;
 -	} else if (oops_in_progress) {
 -		locked = spin_trylock(&up->port.lock);
 -	} else
 -		spin_lock(&up->port.lock);
-+	if (up->port.sysrq || oops_in_progress)
-+		locked = spin_trylock_irqsave(&up->port.lock, flags);
-+	else
-+		spin_lock_irqsave(&up->port.lock, flags);
++	} else {
++		if (up->port.sysrq || oops_in_progress)
++			locked = spin_trylock_irqsave(&up->port.lock, flags);
++		else
++			spin_lock_irqsave(&up->port.lock, flags);
++	}
  
  	/*
  	 *	First save the IER then disable the interrupts
-@@ -2931,8 +2929,7 @@ serial8250_console_write(struct console 
+@@ -2931,8 +2934,7 @@ serial8250_console_write(struct console 
  		check_modem_status(up);
  
  	if (locked)
@@ -22703,6 +23390,36 @@ Index: linux-2.6/drivers/tty/tty_buffer.c
  }
  EXPORT_SYMBOL(tty_flip_buffer_push);
  
+Index: linux-2.6/drivers/tty/serial/omap-serial.c
+===================================================================
+--- linux-2.6.orig/drivers/tty/serial/omap-serial.c
++++ linux-2.6/drivers/tty/serial/omap-serial.c
+@@ -947,13 +947,12 @@ serial_omap_console_write(struct console
+ 	unsigned int ier;
+ 	int locked = 1;
+ 
+-	local_irq_save(flags);
+ 	if (up->port.sysrq)
+ 		locked = 0;
+ 	else if (oops_in_progress)
+-		locked = spin_trylock(&up->port.lock);
++		locked = spin_trylock_irqsave(&up->port.lock, flags);
+ 	else
+-		spin_lock(&up->port.lock);
++		spin_lock_irqsave(&up->port.lock, flags);
+ 
+ 	/*
+ 	 * First save the IER then disable the interrupts
+@@ -980,8 +979,7 @@ serial_omap_console_write(struct console
+ 		check_modem_status(up);
+ 
+ 	if (locked)
+-		spin_unlock(&up->port.lock);
+-	local_irq_restore(flags);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+ 
+ static int __init
 Index: linux-2.6/fs/namespace.c
 ===================================================================
 --- linux-2.6.orig/fs/namespace.c
@@ -23459,6 +24176,115 @@ Index: linux-2.6/net/ipv4/sysctl_net_ipv4.c
  		.procname	= "icmp_ignore_bogus_error_responses",
  		.data		= &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
  		.maxlen		= sizeof(int),
+Index: linux-2.6/include/linux/kdb.h
+===================================================================
+--- linux-2.6.orig/include/linux/kdb.h
++++ linux-2.6/include/linux/kdb.h
+@@ -153,12 +153,14 @@ extern int kdb_register(char *, kdb_func
+ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ 			       short, kdb_repeat_t);
+ extern int kdb_unregister(char *);
++#define in_kdb_printk() (kdb_trap_printk)
+ #else /* ! CONFIG_KGDB_KDB */
+ #define kdb_printf(...)
+ #define kdb_init(x)
+ #define kdb_register(...)
+ #define kdb_register_repeat(...)
+ #define kdb_uregister(x)
++#define in_kdb_printk() (0)
+ #endif	/* CONFIG_KGDB_KDB */
+ enum {
+ 	KDB_NOT_INITIALIZED,
+Index: linux-2.6/kernel/debug/kdb/kdb_io.c
+===================================================================
+--- linux-2.6.orig/kernel/debug/kdb/kdb_io.c
++++ linux-2.6/kernel/debug/kdb/kdb_io.c
+@@ -539,7 +539,6 @@ int vkdb_printf(const char *fmt, va_list
+ 	int diag;
+ 	int linecount;
+ 	int logging, saved_loglevel = 0;
+-	int saved_trap_printk;
+ 	int got_printf_lock = 0;
+ 	int retlen = 0;
+ 	int fnd, len;
+@@ -550,8 +549,6 @@ int vkdb_printf(const char *fmt, va_list
+ 	unsigned long uninitialized_var(flags);
+ 
+ 	preempt_disable();
+-	saved_trap_printk = kdb_trap_printk;
+-	kdb_trap_printk = 0;
+ 
+ 	/* Serialize kdb_printf if multiple cpus try to write at once.
+ 	 * But if any cpu goes recursive in kdb, just print the output,
+@@ -807,7 +804,6 @@ kdb_print_out:
+ 	} else {
+ 		__release(kdb_printf_lock);
+ 	}
+-	kdb_trap_printk = saved_trap_printk;
+ 	preempt_enable();
+ 	return retlen;
+ }
+@@ -817,9 +813,11 @@ int kdb_printf(const char *fmt, ...)
+ 	va_list ap;
+ 	int r;
+ 
++	kdb_trap_printk++;
+ 	va_start(ap, fmt);
+ 	r = vkdb_printf(fmt, ap);
+ 	va_end(ap);
++	kdb_trap_printk--;
+ 
+ 	return r;
+ }
+Index: linux-2.6/arch/Kconfig
+===================================================================
+--- linux-2.6.orig/arch/Kconfig
++++ linux-2.6/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ 	tristate "OProfile system profiling"
+ 	depends on PROFILING
+ 	depends on HAVE_OPROFILE
++	depends on !PREEMPT_RT_FULL
+ 	select RING_BUFFER
+ 	select RING_BUFFER_ALLOW_SWAP
+ 	help
+Index: linux-2.6/drivers/net/Kconfig
+===================================================================
+--- linux-2.6.orig/drivers/net/Kconfig
++++ linux-2.6/drivers/net/Kconfig
+@@ -3410,6 +3410,7 @@ config NET_FC
+ 
+ config NETCONSOLE
+ 	tristate "Network console logging support"
++	depends on !PREEMPT_RT_FULL
+ 	---help---
+ 	If you want to log kernel messages over the network, enable this.
+ 	See <file:Documentation/networking/netconsole.txt> for details.
+Index: linux-2.6/kernel/time/Kconfig
+===================================================================
+--- linux-2.6.orig/kernel/time/Kconfig
++++ linux-2.6/kernel/time/Kconfig
+@@ -7,6 +7,7 @@ config TICK_ONESHOT
+ config NO_HZ
+ 	bool "Tickless System (Dynamic Ticks)"
+ 	depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
++	depends on !PREEMPT_RT_FULL
+ 	select TICK_ONESHOT
+ 	help
+ 	  This option enables a tickless system: timer interrupts will
+Index: linux-2.6/mm/Kconfig
+===================================================================
+--- linux-2.6.orig/mm/Kconfig
++++ linux-2.6/mm/Kconfig
+@@ -304,7 +304,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+ 
+ config TRANSPARENT_HUGEPAGE
+ 	bool "Transparent Hugepage Support"
+-	depends on X86 && MMU
++	depends on X86 && MMU && !PREEMPT_RT_FULL
+ 	select COMPACTION
+ 	help
+ 	  Transparent Hugepages allows the kernel to use huge pages and
 Index: linux-2.6/init/Makefile
 ===================================================================
 --- linux-2.6.orig/init/Makefile
diff --git a/patches/series/2-extra b/patches/series/2-extra
new file mode 100644
index 0000000..06279d1
--- /dev/null
+++ b/patches/series/2-extra
@@ -0,0 +1 @@
++ features/all/rt/patch-3.0-rt6.patch featureset=rt
diff --git a/patches/series/base-extra b/patches/series/base-extra
deleted file mode 100644
index 86b34ee..0000000
--- a/patches/series/base-extra
+++ /dev/null
@@ -1,2 +0,0 @@
-+ features/all/rt/patch-3.0-rt2.patch featureset=rt
-
-- 
1.7.5.4


Reply to: