@@ -2,5 +2,7 @@
#define _LINUX_SCHED_SIGNAL_H
#include <linux/sched.h>
+void task_join_group_stop(struct task_struct *task);
+
#endif /* _LINUX_SCHED_SIGNAL_H */
@@ -79,6 +79,7 @@
#include <linux/aio.h>
#include <linux/hmm.h>
#include <linux/compiler.h>
+#include <linux/sched/signal.h>
#ifndef __GENKSYMS__
#include <linux/user_namespace.h>
#endif
@@ -1703,18 +1704,20 @@ static struct task_struct *copy_process(unsigned long clone_flags,
*/
copy_seccomp(p);
- /*
- * Process group and session signals need to be delivered to just the
- * parent before the fork or both the parent and the child after the
- * fork. Restart if a signal comes in before we add the new process to
- * it's process group.
- * A fatal signal pending means that current will exit, so the new
- * thread can't slip out of an OOM kill (or normal SIGKILL).
- */
- recalc_sigpending();
- if (signal_pending(current)) {
- retval = -ERESTARTNOINTR;
- goto bad_fork_cancel_cgroup;
+ if (!(clone_flags & CLONE_THREAD)) {
+ /*
+ * Process group and session signals need to be delivered to just the
+ * parent before the fork or both the parent and the child after the
+ * fork. Restart if a signal comes in before we add the new process to
+ * it's process group.
+ * A fatal signal pending means that current will exit, so the new
+ * thread can't slip out of an OOM kill (or normal SIGKILL).
+ */
+ recalc_sigpending();
+ if (signal_pending(current)) {
+ retval = -ERESTARTNOINTR;
+ goto bad_fork_cancel_cgroup;
+ }
}
if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
retval = -ENOMEM;
@@ -1752,6 +1755,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
+ task_join_group_stop(p);
list_add_tail_rcu(&p->thread_group,
&p->group_leader->thread_group);
list_add_tail_rcu(&p->thread_node,
@@ -374,6 +374,20 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
+void task_join_group_stop(struct task_struct *task)
+{
+ /* Have the new thread join an on-going signal group stop */
+ unsigned long jobctl = current->jobctl;
+ if (jobctl & JOBCTL_STOP_PENDING) {
+ struct signal_struct *sig = current->signal;
+ unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
+ unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
+ if (task_set_jobctl_pending(task, signr | gstop)) {
+ sig->group_stop_count++;
+ }
+ }
+}
+
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an