[LITMUS^RT] locking improvements

Björn Brandenburg bbb at mpi-sws.org
Fri Feb 15 17:00:54 CET 2013


Hi everyone,

I've pushed two patches that make it easier to use the locking API correctly.

	LITMUS^RT: https://github.com/LITMUS-RT/litmus-rt/commits/prop/prevent-lock-nesting

	liblitmus: https://github.com/LITMUS-RT/liblitmus/commits/prop/prevent-lock-nesting

The main change is that the kernel will now return -EBUSY when userspace attempts to nest critical sections. This check is enforced on a protocol-by-protocol basis and should not interfere with future RNLP work (or other protocols that support nesting). Several test cases have been added to liblitmus to make sure this feature works as expected.

Further, a compilation error related to tracepoints was identified and corrected by Felipe; I've merged the corresponding patch already into staging.

The main patch is included below. Please let me know if you have any comments.

Thanks,
Björn


commit 5b8782ef8948c7aad808971f359401f1dc837c25
Author: Bjoern Brandenburg <bbb at mpi-sws.org>
Date:   Fri Feb 15 16:44:42 2013 +0100

    Disallow nesting of LITMUS^RT locks
    
    Nesting of locks was never supported in LITMUS^RT since
    the required analysis does not exist anyway. That is, as
    defined in the literature, the protocols implemented
    in LITMUS^RT have not been studied in conjunction with
    nested critical sections.
    
    In LITMUS^RT, attempting to nest locks could lead to
    silent or not-so-silent bugs. This patch makes this
    restriction explicit and returns EBUSY when a process
    attempts to nest resources.
    
    This is enforced on a protocol-by-protocol basis,
    which means that adding protocols with support for
    nesting in future versions is not affected by this
    change.
    
    Exception: PCP and SRP resources may be nested,
    but not within global critical sections.

diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 4cd06dd..5487bdb 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -168,6 +168,11 @@ struct rt_param {
 	unsigned int		priority_boosted:1;
 	/* If so, when did this start? */
 	lt_t			boost_start_time;
+
+	/* How many LITMUS^RT locks does the task currently hold/wait for? */
+	unsigned int		num_locks_held;
+	/* How many PCP/SRP locks does the task currently hold/wait for? */
+	unsigned int		num_local_locks_held;
 #endif
 
 	/* user controlled parameters */
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index b8548b8..8fdc8f6 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -773,6 +773,10 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
 	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent nested lock acquisition --- not supported by FMLP */
+	if (tsk_rt(t)->num_locks_held)
+		return -EBUSY;
+
 	spin_lock_irqsave(&sem->wait.lock, flags);
 
 	if (sem->owner) {
@@ -817,6 +821,8 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
 		spin_unlock_irqrestore(&sem->wait.lock, flags);
 	}
 
+	tsk_rt(t)->num_locks_held++;
+
 	return 0;
 }
 
@@ -834,6 +840,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+	tsk_rt(t)->num_locks_held--;
+
 	/* check if there are jobs waiting for this resource */
 	next = __waitqueue_remove_first(&sem->wait);
 	if (next) {
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index e3c3bd7..fc9a509 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -555,6 +555,11 @@ int pfp_fmlp_lock(struct litmus_lock* l)
 	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent nested lock acquisition --- not supported by FMLP */
+	if (tsk_rt(t)->num_locks_held ||
+	    tsk_rt(t)->num_local_locks_held)
+		return -EBUSY;
+
 	spin_lock_irqsave(&sem->wait.lock, flags);
 
 	/* tie-break by this point in time */
@@ -599,6 +604,8 @@ int pfp_fmlp_lock(struct litmus_lock* l)
 		spin_unlock_irqrestore(&sem->wait.lock, flags);
 	}
 
+	tsk_rt(t)->num_locks_held++;
+
 	return 0;
 }
 
@@ -616,6 +623,8 @@ int pfp_fmlp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+	tsk_rt(t)->num_locks_held--;
+
 	/* we lose the benefit of priority boosting */
 
 	unboost_priority(t);
@@ -790,6 +799,11 @@ int pfp_mpcp_lock(struct litmus_lock* l)
 	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent nested lock acquisition */
+	if (tsk_rt(t)->num_locks_held ||
+	    tsk_rt(t)->num_local_locks_held)
+		return -EBUSY;
+
 	preempt_disable();
 
 	if (sem->vspin)
@@ -840,6 +854,8 @@ int pfp_mpcp_lock(struct litmus_lock* l)
 		spin_unlock_irqrestore(&sem->wait.lock, flags);
 	}
 
+	tsk_rt(t)->num_locks_held++;
+
 	return 0;
 }
 
@@ -857,6 +873,9 @@ int pfp_mpcp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+
+	tsk_rt(t)->num_locks_held--;
+
 	/* we lose the benefit of priority boosting */
 
 	unboost_priority(t);
@@ -1249,12 +1268,18 @@ int pfp_pcp_lock(struct litmus_lock* l)
 	if (!is_realtime(t) || from != to)
 		return -EPERM;
 
+	/* prevent nested lock acquisition in global critical section */
+	if (tsk_rt(t)->num_locks_held)
+		return -EBUSY;
+
 	preempt_disable();
 
 	pcp_raise_ceiling(sem, eprio);
 
 	preempt_enable();
 
+	tsk_rt(t)->num_local_locks_held++;
+
 	return 0;
 }
 
@@ -1272,6 +1297,8 @@ int pfp_pcp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+	tsk_rt(t)->num_local_locks_held--;
+
 	/* give it back */
 	pcp_lower_ceiling(sem);
 
@@ -1428,6 +1455,11 @@ int pfp_dpcp_lock(struct litmus_lock* l)
 	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent nested lock accquisition */
+	if (tsk_rt(t)->num_locks_held ||
+	    tsk_rt(t)->num_local_locks_held)
+		return -EBUSY;
+
 	preempt_disable();
 
 	/* Priority-boost ourself *before* we suspend so that
@@ -1444,6 +1476,8 @@ int pfp_dpcp_lock(struct litmus_lock* l)
 
 	preempt_enable();
 
+	tsk_rt(t)->num_locks_held++;
+
 	return 0;
 }
 
@@ -1461,6 +1495,8 @@ int pfp_dpcp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+	tsk_rt(t)->num_locks_held--;
+
 	home = sem->owner_cpu;
 
 	/* give it back */
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 0e1675d..c158f35 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -419,6 +419,11 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
 	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent nested lock acquisition --- not supported by FMLP */
+	if (tsk_rt(t)->num_locks_held ||
+	    tsk_rt(t)->num_local_locks_held)
+		return -EBUSY;
+
 	spin_lock_irqsave(&sem->wait.lock, flags);
 
 	if (sem->owner) {
@@ -459,6 +464,8 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
 		spin_unlock_irqrestore(&sem->wait.lock, flags);
 	}
 
+	tsk_rt(t)->num_locks_held++;
+
 	return 0;
 }
 
@@ -476,6 +483,8 @@ int psnedf_fmlp_unlock(struct litmus_lock* l)
 		goto out;
 	}
 
+	tsk_rt(t)->num_locks_held--;
+
 	/* we lose the benefit of priority boosting */
 
 	unboost_priority(t);
diff --git a/litmus/srp.c b/litmus/srp.c
index 2ed4ec1..c88dbf2 100644
--- a/litmus/srp.c
+++ b/litmus/srp.c
@@ -98,11 +98,16 @@ static void srp_add_prio(struct srp* srp, struct srp_priority* prio)
 
 static int lock_srp_semaphore(struct litmus_lock* l)
 {
+	struct task_struct* t = current;
 	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
 
-	if (!is_realtime(current))
+	if (!is_realtime(t))
 		return -EPERM;
 
+	/* prevent acquisition of local locks in global critical sections */
+	if (tsk_rt(t)->num_locks_held)
+		return -EBUSY;
+
 	preempt_disable();
 
 	/* Update ceiling. */
@@ -111,9 +116,11 @@ static int lock_srp_semaphore(struct litmus_lock* l)
 	/* SRP invariant: all resources available */
 	BUG_ON(sem->owner != NULL);
 
-	sem->owner = current;
+	sem->owner = t;
 	TRACE_CUR("acquired srp 0x%p\n", sem);
 
+	tsk_rt(t)->num_local_locks_held++;
+
 	preempt_enable();
 
 	return 0;
@@ -121,12 +128,13 @@ static int lock_srp_semaphore(struct litmus_lock* l)
 
 static int unlock_srp_semaphore(struct litmus_lock* l)
 {
+	struct task_struct* t = current;
 	struct srp_semaphore* sem = container_of(l, struct srp_semaphore, litmus_lock);
 	int err = 0;
 
 	preempt_disable();
 
-	if (sem->owner != current) {
+	if (sem->owner != t) {
 		err = -EINVAL;
 	} else {
 		/* Determine new system priority ceiling for this CPU. */
@@ -138,6 +146,8 @@ static int unlock_srp_semaphore(struct litmus_lock* l)
 		/* Wake tasks on this CPU, if they exceed current ceiling. */
 		TRACE_CUR("released srp 0x%p\n", sem);
 		wake_up_all(&__get_cpu_var(srp).ceiling_blocked);
+
+		tsk_rt(t)->num_local_locks_held--;
 	}
 
 	preempt_enable();





More information about the litmus-dev mailing list