[LITMUS^RT] [PATCH 3/5] litmus: get rid of RT_F_SLEEP and RT_F_RUNNING
Manohar Vanga
mvanga at mpi-sws.org
Thu Oct 4 12:31:06 CEST 2012
This patch removes the flags RT_F_SLEEP and RT_F_RUNNING
as their name is misleading. This patch replaces them with
a 'completed' field in struct rt_param.
Signed-off-by: Manohar Vanga <mvanga at mpi-sws.org>
---
include/litmus/litmus.h | 5 +++++
include/litmus/rt_param.h | 5 +++--
litmus/jobs.c | 4 ++--
litmus/sched_cedf.c | 12 ++++++------
litmus/sched_gsn_edf.c | 14 +++++++-------
litmus/sched_pfair.c | 8 ++++----
litmus/sched_pfp.c | 8 ++++----
litmus/sched_psn_edf.c | 8 ++++----
8 files changed, 35 insertions(+), 29 deletions(-)
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 807b788..ead6ab4 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -232,6 +232,11 @@ static inline int is_present(struct task_struct* t)
return t && tsk_rt(t)->present;
}
+static inline int is_completed(struct task_struct* t)
+{
+ return t && tsk_rt(t)->completed;
+}
+
/* make the unit explicit */
typedef unsigned long quanta_t;
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index fac939d..ce7b4b0 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -142,6 +142,9 @@ struct rt_param {
/* is the task present? (true if it can be scheduled) */
unsigned int present:1;
+ /* has the task completed? */
+ unsigned int completed:1;
+
#ifdef CONFIG_LITMUS_LOCKING
/* Is the task being priority-boosted by a locking protocol? */
unsigned int priority_boosted:1;
@@ -228,8 +231,6 @@ struct rt_param {
};
/* Possible RT flags */
-#define RT_F_RUNNING 0x00000000
-#define RT_F_SLEEP 0x00000001
#define RT_F_EXIT_SEM 0x00000008
#endif
diff --git a/litmus/jobs.c b/litmus/jobs.c
index fb093c0..13a4ed4 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -38,7 +38,7 @@ void release_at(struct task_struct *t, lt_t start)
{
BUG_ON(!t);
setup_release(t, start);
- set_rt_flags(t, RT_F_RUNNING);
+ tsk_rt(t)->completed = 0;
}
@@ -48,7 +48,7 @@ void release_at(struct task_struct *t, lt_t start)
long complete_job(void)
{
/* Mark that we do not excute anymore */
- set_rt_flags(current, RT_F_SLEEP);
+ tsk_rt(current)->completed = 1;
/* call schedule, this will return when a new job arrives
* it also takes care of preparing for the next release
*/
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index b0c16e3..62d28d2 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -171,7 +171,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
/* Link new task to CPU. */
if (linked) {
- set_rt_flags(linked, RT_F_RUNNING);
+ tsk_rt(linked)->completed = 0;
/* handle task is already scheduled somewhere! */
on_cpu = linked->rt_param.scheduled_on;
if (on_cpu != NO_CPU) {
@@ -350,7 +350,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
TRACE_TASK(t, "job_completion().\n");
/* set flags */
- set_rt_flags(t, RT_F_SLEEP);
+ tsk_rt(t)->completed = 1;
/* prepare for next period */
prepare_for_next_period(t);
if (is_released(t, litmus_clock()))
@@ -404,7 +404,7 @@ static void cedf_tick(struct task_struct* t)
*
* - !is_running(scheduled) // the job blocks
* - scheduled->timeslice == 0 // the job completed (forcefully)
- * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
+ * - is_completed() // the job completed (by syscall)
* - linked != scheduled // we need to reschedule (for any reason)
* - is_np(scheduled) // rescheduling must be delayed,
* sys_exit_np must be requested
@@ -443,7 +443,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
budget_enforced(entry->scheduled) &&
budget_exhausted(entry->scheduled);
np = exists && is_np(entry->scheduled);
- sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
+ sleep = exists && is_completed(entry->scheduled);
preempt = entry->scheduled != entry->linked;
#ifdef WANT_ALL_SCHED_EVENTS
@@ -600,7 +600,7 @@ static void cedf_task_wake_up(struct task_struct *task)
* a semaphore, it should never be treated as a new job release.
*/
if (get_rt_flags(task) == RT_F_EXIT_SEM) {
- set_rt_flags(task, RT_F_RUNNING);
+ tsk_rt(task)->completed = 0;
} else {
now = litmus_clock();
if (is_tardy(task, now)) {
@@ -612,7 +612,7 @@ static void cedf_task_wake_up(struct task_struct *task)
if (task->rt.time_slice) {
/* came back in time before deadline
*/
- set_rt_flags(task, RT_F_RUNNING);
+ tsk_rt(task)->completed = 0;
}
}
}
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c3344b9..990e4e1 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -44,7 +44,7 @@
* (thereby removing its association with this
* CPU). However, it will not requeue the
* previously linked task (if any). It will set
- * T's state to RT_F_RUNNING and check whether
+ * T's state to not completed and check whether
* it is already running somewhere else. If T
* is scheduled somewhere else it will link
* it to that CPU instead (and pull the linked
@@ -173,7 +173,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
/* Link new task to CPU. */
if (linked) {
- set_rt_flags(linked, RT_F_RUNNING);
+ tsk_rt(linked)->completed = 0;
/* handle task is already scheduled somewhere! */
on_cpu = linked->rt_param.scheduled_on;
if (on_cpu != NO_CPU) {
@@ -341,7 +341,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
TRACE_TASK(t, "job_completion().\n");
/* set flags */
- set_rt_flags(t, RT_F_SLEEP);
+ tsk_rt(t)->completed = 1;
/* prepare for next period */
prepare_for_next_period(t);
if (is_released(t, litmus_clock()))
@@ -394,7 +394,7 @@ static void gsnedf_tick(struct task_struct* t)
*
* - !is_running(scheduled) // the job blocks
* - scheduled->timeslice == 0 // the job completed (forcefully)
- * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
+ * - is_completed() // the job completed (by syscall)
* - linked != scheduled // we need to reschedule (for any reason)
* - is_np(scheduled) // rescheduling must be delayed,
* sys_exit_np must be requested
@@ -430,7 +430,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
out_of_time = exists && budget_enforced(entry->scheduled)
&& budget_exhausted(entry->scheduled);
np = exists && is_np(entry->scheduled);
- sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
+ sleep = exists && is_completed(entry->scheduled);
preempt = entry->scheduled != entry->linked;
#ifdef WANT_ALL_SCHED_EVENTS
@@ -582,7 +582,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
* a semaphore, it should never be treated as a new job release.
*/
if (get_rt_flags(task) == RT_F_EXIT_SEM) {
- set_rt_flags(task, RT_F_RUNNING);
+ tsk_rt(task)->completed = 0;
} else {
now = litmus_clock();
if (is_tardy(task, now)) {
@@ -594,7 +594,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
if (task->rt.time_slice) {
/* came back in time before deadline
*/
- set_rt_flags(task, RT_F_RUNNING);
+ tsk_rt(task)->completed = 0;
}
}
}
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 72c06a4..58f7fc9 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -299,7 +299,7 @@ static void pfair_prepare_next_period(struct task_struct* t)
struct pfair_param* p = tsk_pfair(t);
prepare_for_next_period(t);
- get_rt_flags(t) = RT_F_RUNNING;
+ tsk_rt(t)->completed = 0;
p->release += p->period;
}
@@ -598,7 +598,7 @@ static int safe_to_schedule(struct task_struct* t, int cpu)
"scheduled already on %d.\n", cpu, where);
return 0;
} else
- return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING;
+ return tsk_rt(t)->present && !is_completed(t);
}
static struct task_struct* pfair_schedule(struct task_struct * prev)
@@ -621,7 +621,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
raw_spin_lock(cpu_lock(state));
blocks = is_realtime(prev) && !is_running(prev);
- completion = is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP;
+ completion = is_realtime(prev) && is_completed(prev);
out_of_time = is_realtime(prev) && time_after(cur_release(prev),
state->local_tick);
@@ -720,7 +720,7 @@ static void pfair_task_wake_up(struct task_struct *t)
/* only add to ready queue if the task isn't still linked somewhere */
if (requeue) {
TRACE_TASK(t, "requeueing required\n");
- tsk_rt(t)->flags = RT_F_RUNNING;
+ tsk_rt(t)->completed = 0;
__add_ready(&cluster->pfair, t);
}
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index b16406e..3e5b40d 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -115,7 +115,7 @@ static void requeue(struct task_struct* t, pfp_domain_t *pfp)
if (t->state != TASK_RUNNING)
TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
- set_rt_flags(t, RT_F_RUNNING);
+ tsk_rt(t)->completed = 0;
if (is_released(t, litmus_clock()))
fp_prio_add(&pfp->ready_queue, t, priority_index(t));
else
@@ -127,7 +127,7 @@ static void job_completion(struct task_struct* t, int forced)
sched_trace_task_completion(t,forced);
TRACE_TASK(t, "job_completion().\n");
- set_rt_flags(t, RT_F_SLEEP);
+ tsk_rt(t)->completed = 1;
prepare_for_next_period(t);
}
@@ -179,7 +179,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
budget_enforced(pfp->scheduled) &&
budget_exhausted(pfp->scheduled);
np = exists && is_np(pfp->scheduled);
- sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP;
+ sleep = exists && is_completed(pfp->scheduled);
migrate = exists && get_partition(pfp->scheduled) != pfp->cpu;
preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev);
@@ -232,7 +232,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
if (next) {
TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
- set_rt_flags(next, RT_F_RUNNING);
+ tsk_rt(next)->completed = 0;
} else {
TRACE("becoming idle at %llu\n", litmus_clock());
}
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 8933e15..0e1675d 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -60,7 +60,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
if (t->state != TASK_RUNNING)
TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
- set_rt_flags(t, RT_F_RUNNING);
+ tsk_rt(t)->completed = 0;
if (is_released(t, litmus_clock()))
__add_ready(edf, t);
else
@@ -160,7 +160,7 @@ static void job_completion(struct task_struct* t, int forced)
sched_trace_task_completion(t,forced);
TRACE_TASK(t, "job_completion().\n");
- set_rt_flags(t, RT_F_SLEEP);
+ tsk_rt(t)->completed = 1;
prepare_for_next_period(t);
}
@@ -214,7 +214,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
budget_enforced(pedf->scheduled) &&
budget_exhausted(pedf->scheduled);
np = exists && is_np(pedf->scheduled);
- sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP;
+ sleep = exists && is_completed(pedf->scheduled);
preempt = edf_preemption_needed(edf, prev);
/* If we need to preempt do so.
@@ -266,7 +266,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
if (next) {
TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
- set_rt_flags(next, RT_F_RUNNING);
+ tsk_rt(next)->completed = 0;
} else {
TRACE("becoming idle at %llu\n", litmus_clock());
}
--
1.7.2.5
More information about the litmus-dev
mailing list