beyond_笑谈 发表于 2025-1-7 21:54

《Linux内核深度解析》 进程调度和多线程管理

<div class='showpostmsg'><p>在Linux内核中,3个系统调用可以用来创建新的进程。</p>

<ol>
        <li >fork(分叉):子进程是父进程的一个副本,采用了写时复制的技术</li>
        <li >vfork:用于创建子进程,之后子进程立即调用execve以装载新程序的情况。为了避免复制物理页,父进程会睡眠等待子进程装载新程序。现在fork采用了写时复制的技术,vfork失去了速度优势,已经被废弃。</li>
        <li >clone(克隆):可以精确地控制子进程和父进程共享哪些资源。这个系统调用的主要用处是可供pthread库用来创建线程。其中clone是功能最齐全的函数,参数多,使用复杂,fork是clone的简化函数。</li>
        <li >函数_do_fork,代码如下</li>
</ol>

<pre>
<code>long _do_fork(unsigned long clone_flags,        //克隆标志
              unsigned long stack_start,        //用作指定新线程的用户栈的起始地址
              unsigned long stack_size,                //指定新线程的用户栈的长度
              int __user *parent_tidptr,                //新线程保存进程标识符的位置
              int __user *child_tidptr,                //存放新线程保存进程标识符的位置
              unsigned long tls)                        //指定新线程本地存储的位置
</code></pre>

<p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;5. 函数copy_process</p>

<p >&nbsp;&nbsp;&nbsp;&nbsp;创建新进程的主要工作由函数copy_process完成,代码如下</p>

<pre>
<code>static __latent_entropy struct task_struct *copy_process(
                                        unsigned long clone_flags,
                                        unsigned long stack_start,
                                        unsigned long stack_size,
                                        int __user *child_tidptr,
                                        struct pid *pid,
                                        int trace,
                                        unsigned long tls,
                                        int node)
{
        int retval;
        struct task_struct *p;

        if ((clone_flags &amp; (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
                return ERR_PTR(-EINVAL);

        if ((clone_flags &amp; (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
                return ERR_PTR(-EINVAL);

        /*
       * Thread groups must share signals as well, and detached threads
       * can only be started up within the thread group.
       */
        if ((clone_flags &amp; CLONE_THREAD) &amp;&amp; !(clone_flags &amp; CLONE_SIGHAND))
                return ERR_PTR(-EINVAL);

        /*
       * Shared signal handlers imply shared VM. By way of the above,
       * thread groups also imply shared VM. Blocking this case allows
       * for various simplifications in other code.
       */
        if ((clone_flags &amp; CLONE_SIGHAND) &amp;&amp; !(clone_flags &amp; CLONE_VM))
                return ERR_PTR(-EINVAL);

        /*
       * Siblings of global init remain as zombies on exit since they are
       * not reaped by their parent (swapper). To solve this and to avoid
       * multi-rooted process trees, prevent global and container-inits
       * from creating siblings.
       */
        if ((clone_flags &amp; CLONE_PARENT) &amp;&amp;
                                current-&gt;signal-&gt;flags &amp; SIGNAL_UNKILLABLE)
                return ERR_PTR(-EINVAL);

        /*
       * If the new process will be in a different pid or user namespace
       * do not allow it to share a thread group with the forking task.
       */
        if (clone_flags &amp; CLONE_THREAD) {
                if ((clone_flags &amp; (CLONE_NEWUSER | CLONE_NEWPID)) ||
                  (task_active_pid_ns(current) !=
                                current-&gt;nsproxy-&gt;pid_ns_for_children))
                        return ERR_PTR(-EINVAL);
        }

        retval = security_task_create(clone_flags);
        if (retval)
                goto fork_out;

        retval = -ENOMEM;
        p = dup_task_struct(current, node);
        if (!p)
                goto fork_out;

        /*
       * This _must_ happen before we call free_task(), i.e. before we jump
       * to any of the bad_fork_* labels. This is to avoid freeing
       * p-&gt;set_child_tid which is (ab)used as a kthread's data pointer for
       * kernel threads (PF_KTHREAD).
       */
        p-&gt;set_child_tid = (clone_flags &amp; CLONE_CHILD_SETTID) ? child_tidptr : NULL;
        /*
       * Clear TID on mm_release()?
       */
        p-&gt;clear_child_tid = (clone_flags &amp; CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;

        ftrace_graph_init_task(p);

        rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
        DEBUG_LOCKS_WARN_ON(!p-&gt;hardirqs_enabled);
        DEBUG_LOCKS_WARN_ON(!p-&gt;softirqs_enabled);
#endif
        retval = -EAGAIN;
        if (atomic_read(&amp;p-&gt;real_cred-&gt;user-&gt;processes) &gt;=
                        task_rlimit(p, RLIMIT_NPROC)) {
                if (p-&gt;real_cred-&gt;user != INIT_USER &amp;&amp;
                  !capable(CAP_SYS_RESOURCE) &amp;&amp; !capable(CAP_SYS_ADMIN))
                        goto bad_fork_free;
        }
        current-&gt;flags &amp;= ~PF_NPROC_EXCEEDED;

        retval = copy_creds(p, clone_flags);
        if (retval &lt; 0)
                goto bad_fork_free;

        /*
       * If multiple threads are within copy_process(), then this check
       * triggers too late. This doesn't hurt, the check is only there
       * to stop root fork bombs.
       */
        retval = -EAGAIN;
        if (nr_threads &gt;= max_threads)
                goto bad_fork_cleanup_count;

        delayacct_tsk_init(p);        /* Must remain after dup_task_struct() */
        p-&gt;flags &amp;= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
        p-&gt;flags |= PF_FORKNOEXEC;
        INIT_LIST_HEAD(&amp;p-&gt;children);
        INIT_LIST_HEAD(&amp;p-&gt;sibling);
        rcu_copy_process(p);
        p-&gt;vfork_done = NULL;
        spin_lock_init(&amp;p-&gt;alloc_lock);

        init_sigpending(&amp;p-&gt;pending);

        p-&gt;utime = p-&gt;stime = p-&gt;gtime = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
        p-&gt;utimescaled = p-&gt;stimescaled = 0;
#endif
        prev_cputime_init(&amp;p-&gt;prev_cputime);

#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
        seqcount_init(&amp;p-&gt;vtime_seqcount);
        p-&gt;vtime_snap = 0;
        p-&gt;vtime_snap_whence = VTIME_INACTIVE;
#endif

#if defined(SPLIT_RSS_COUNTING)
        memset(&amp;p-&gt;rss_stat, 0, sizeof(p-&gt;rss_stat));
#endif

        p-&gt;default_timer_slack_ns = current-&gt;timer_slack_ns;

        task_io_accounting_init(&amp;p-&gt;ioac);
        acct_clear_integrals(p);

        posix_cpu_timers_init(p);

        p-&gt;start_time = ktime_get_ns();
        p-&gt;real_start_time = ktime_get_boot_ns();
        p-&gt;io_context = NULL;
        p-&gt;audit_context = NULL;
        cgroup_fork(p);
#ifdef CONFIG_NUMA
        p-&gt;mempolicy = mpol_dup(p-&gt;mempolicy);
        if (IS_ERR(p-&gt;mempolicy)) {
                retval = PTR_ERR(p-&gt;mempolicy);
                p-&gt;mempolicy = NULL;
                goto bad_fork_cleanup_threadgroup_lock;
        }
#endif
#ifdef CONFIG_CPUSETS
        p-&gt;cpuset_mem_spread_rotor = NUMA_NO_NODE;
        p-&gt;cpuset_slab_spread_rotor = NUMA_NO_NODE;
        seqcount_init(&amp;p-&gt;mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
        p-&gt;irq_events = 0;
        p-&gt;hardirqs_enabled = 0;
        p-&gt;hardirq_enable_ip = 0;
        p-&gt;hardirq_enable_event = 0;
        p-&gt;hardirq_disable_ip = _THIS_IP_;
        p-&gt;hardirq_disable_event = 0;
        p-&gt;softirqs_enabled = 1;
        p-&gt;softirq_enable_ip = _THIS_IP_;
        p-&gt;softirq_enable_event = 0;
        p-&gt;softirq_disable_ip = 0;
        p-&gt;softirq_disable_event = 0;
        p-&gt;hardirq_context = 0;
        p-&gt;softirq_context = 0;
#endif

        p-&gt;pagefault_disabled = 0;

#ifdef CONFIG_LOCKDEP
        p-&gt;lockdep_depth = 0; /* no locks held yet */
        p-&gt;curr_chain_key = 0;
        p-&gt;lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
        p-&gt;blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_BCACHE
        p-&gt;sequential_io        = 0;
        p-&gt;sequential_io_avg        = 0;
#endif

        /* Perform scheduler related setup. Assign this task to a CPU. */
        retval = sched_fork(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_policy;

        retval = perf_event_init_task(p);
        if (retval)
                goto bad_fork_cleanup_policy;
        retval = audit_alloc(p);
        if (retval)
                goto bad_fork_cleanup_perf;
        /* copy all the process information */
        shm_init_task(p);
        retval = security_task_alloc(p, clone_flags);
        if (retval)
                goto bad_fork_cleanup_audit;
        retval = copy_semundo(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_security;
        retval = copy_files(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_semundo;
        retval = copy_fs(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_files;
        retval = copy_sighand(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_fs;
        retval = copy_signal(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_sighand;
        retval = copy_mm(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_signal;
        retval = copy_namespaces(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_mm;
        retval = copy_io(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_namespaces;
        retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
        if (retval)
                goto bad_fork_cleanup_io;

        if (pid != &amp;init_struct_pid) {
                pid = alloc_pid(p-&gt;nsproxy-&gt;pid_ns_for_children);
                if (IS_ERR(pid)) {
                        retval = PTR_ERR(pid);
                        goto bad_fork_cleanup_thread;
                }
        }

#ifdef CONFIG_BLOCK
        p-&gt;plug = NULL;
#endif
#ifdef CONFIG_FUTEX
        p-&gt;robust_list = NULL;
#ifdef CONFIG_COMPAT
        p-&gt;compat_robust_list = NULL;
#endif
        INIT_LIST_HEAD(&amp;p-&gt;pi_state_list);
        p-&gt;pi_state_cache = NULL;
#endif
        /*
       * sigaltstack should be cleared when sharing the same VM
       */
        if ((clone_flags &amp; (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
                sas_ss_reset(p);

        /*
       * Syscall tracing and stepping should be turned off in the
       * child regardless of CLONE_PTRACE.
       */
        user_disable_single_step(p);
        clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
        clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
        clear_all_latency_tracing(p);

        /* ok, now we should be set up.. */
        p-&gt;pid = pid_nr(pid);
        if (clone_flags &amp; CLONE_THREAD) {
                p-&gt;exit_signal = -1;
                p-&gt;group_leader = current-&gt;group_leader;
                p-&gt;tgid = current-&gt;tgid;
        } else {
                if (clone_flags &amp; CLONE_PARENT)
                        p-&gt;exit_signal = current-&gt;group_leader-&gt;exit_signal;
                else
                        p-&gt;exit_signal = (clone_flags &amp; CSIGNAL);
                p-&gt;group_leader = p;
                p-&gt;tgid = p-&gt;pid;
        }

        p-&gt;nr_dirtied = 0;
        p-&gt;nr_dirtied_pause = 128 &gt;&gt; (PAGE_SHIFT - 10);
        p-&gt;dirty_paused_when = 0;

        p-&gt;pdeath_signal = 0;
        INIT_LIST_HEAD(&amp;p-&gt;thread_group);
        p-&gt;task_works = NULL;

        cgroup_threadgroup_change_begin(current);
        /*
       * Ensure that the cgroup subsystem policies allow the new process to be
       * forked. It should be noted the the new process's css_set can be changed
       * between here and cgroup_post_fork() if an organisation operation is in
       * progress.
       */
        retval = cgroup_can_fork(p);
        if (retval)
                goto bad_fork_free_pid;

        /*
       * Make it visible to the rest of the system, but dont wake it up yet.
       * Need tasklist lock for parent etc handling!
       */
        write_lock_irq(&amp;tasklist_lock);

        /* CLONE_PARENT re-uses the old parent */
        if (clone_flags &amp; (CLONE_PARENT|CLONE_THREAD)) {
                p-&gt;real_parent = current-&gt;real_parent;
                p-&gt;parent_exec_id = current-&gt;parent_exec_id;
        } else {
                p-&gt;real_parent = current;
                p-&gt;parent_exec_id = current-&gt;self_exec_id;
        }

        klp_copy_process(p);

        spin_lock(&amp;current-&gt;sighand-&gt;siglock);

        /*
       * Copy seccomp details explicitly here, in case they were changed
       * before holding sighand lock.
       */
        copy_seccomp(p);

        /*
       * Process group and session signals need to be delivered to just the
       * parent before the fork or both the parent and the child after the
       * fork. Restart if a signal comes in before we add the new process to
       * it's process group.
       * A fatal signal pending means that current will exit, so the new
       * thread can't slip out of an OOM kill (or normal SIGKILL).
        */
        recalc_sigpending();
        if (signal_pending(current)) {
                retval = -ERESTARTNOINTR;
                goto bad_fork_cancel_cgroup;
        }
        if (unlikely(!(ns_of_pid(pid)-&gt;nr_hashed &amp; PIDNS_HASH_ADDING))) {
                retval = -ENOMEM;
                goto bad_fork_cancel_cgroup;
        }

        if (likely(p-&gt;pid)) {
                ptrace_init_task(p, (clone_flags &amp; CLONE_PTRACE) || trace);

                init_task_pid(p, PIDTYPE_PID, pid);
                if (thread_group_leader(p)) {
                        init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
                        init_task_pid(p, PIDTYPE_SID, task_session(current));

                        if (is_child_reaper(pid)) {
                                ns_of_pid(pid)-&gt;child_reaper = p;
                                p-&gt;signal-&gt;flags |= SIGNAL_UNKILLABLE;
                        }

                        p-&gt;signal-&gt;leader_pid = pid;
                        p-&gt;signal-&gt;tty = tty_kref_get(current-&gt;signal-&gt;tty);
                        /*
                       * Inherit has_child_subreaper flag under the same
                       * tasklist_lock with adding child to the process tree
                       * for propagate_has_child_subreaper optimization.
                       */
                        p-&gt;signal-&gt;has_child_subreaper = p-&gt;real_parent-&gt;signal-&gt;has_child_subreaper ||
                                                       p-&gt;real_parent-&gt;signal-&gt;is_child_subreaper;
                        list_add_tail(&amp;p-&gt;sibling, &amp;p-&gt;real_parent-&gt;children);
                        list_add_tail_rcu(&amp;p-&gt;tasks, &amp;init_task.tasks);
                        attach_pid(p, PIDTYPE_PGID);
                        attach_pid(p, PIDTYPE_SID);
                        __this_cpu_inc(process_counts);
                } else {
                        current-&gt;signal-&gt;nr_threads++;
                        atomic_inc(&amp;current-&gt;signal-&gt;live);
                        atomic_inc(&amp;current-&gt;signal-&gt;sigcnt);
                        list_add_tail_rcu(&amp;p-&gt;thread_group,
                                          &amp;p-&gt;group_leader-&gt;thread_group);
                        list_add_tail_rcu(&amp;p-&gt;thread_node,
                                          &amp;p-&gt;signal-&gt;thread_head);
                }
                attach_pid(p, PIDTYPE_PID);
                nr_threads++;
        }

        total_forks++;
        spin_unlock(&amp;current-&gt;sighand-&gt;siglock);
        syscall_tracepoint_update(p);
        write_unlock_irq(&amp;tasklist_lock);

        proc_fork_connector(p);
        cgroup_post_fork(p);
        cgroup_threadgroup_change_end(current);
        perf_event_fork(p);

        trace_task_newtask(p, clone_flags);
        uprobe_copy_process(p, clone_flags);

        return p;

bad_fork_cancel_cgroup:
        spin_unlock(&amp;current-&gt;sighand-&gt;siglock);
        write_unlock_irq(&amp;tasklist_lock);
        cgroup_cancel_fork(p);
bad_fork_free_pid:
        cgroup_threadgroup_change_end(current);
        if (pid != &amp;init_struct_pid)
                free_pid(pid);
bad_fork_cleanup_thread:
        exit_thread(p);
bad_fork_cleanup_io:
        if (p-&gt;io_context)
                exit_io_context(p);
bad_fork_cleanup_namespaces:
        exit_task_namespaces(p);
bad_fork_cleanup_mm:
        if (p-&gt;mm)
                mmput(p-&gt;mm);
bad_fork_cleanup_signal:
        if (!(clone_flags &amp; CLONE_THREAD))
                free_signal_struct(p-&gt;signal);
bad_fork_cleanup_sighand:
        __cleanup_sighand(p-&gt;sighand);
bad_fork_cleanup_fs:
        exit_fs(p); /* blocking */
bad_fork_cleanup_files:
        exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
        exit_sem(p);
bad_fork_cleanup_security:
        security_task_free(p);
bad_fork_cleanup_audit:
        audit_free(p);
bad_fork_cleanup_perf:
        perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
        mpol_put(p-&gt;mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
        delayacct_tsk_free(p);
bad_fork_cleanup_count:
        atomic_dec(&amp;p-&gt;cred-&gt;user-&gt;processes);
        exit_creds(p);
bad_fork_free:
        p-&gt;state = TASK_DEAD;
        put_task_stack(p);
        free_task(p);
fork_out:
        return ERR_PTR(retval);
}
//end copy_process
</code></pre>

<p>&nbsp;&nbsp;&nbsp;&nbsp;</p>

<p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6.&nbsp;&nbsp;&nbsp;&nbsp;唤醒新进程</p>

<p >&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;函数wake_up_new_task负责唤醒刚刚创建的新进程,代码如下</p>

<p >&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</p>

<pre>
<code>void wake_up_new_task(struct task_struct *p)
{
        struct rq_flags rf;
        struct rq *rq;

        raw_spin_lock_irqsave(&amp;p-&gt;pi_lock, rf.flags);
        p-&gt;state = TASK_RUNNING;
#ifdef CONFIG_SMP
        /*
       * Fork balancing, do it here and not earlier because:
       *- cpus_allowed can change in the fork path
       *- any previously selected CPU might disappear through hotplug
       *
       * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
       * as we're not fully set-up yet.
       */
        __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
        rq = __task_rq_lock(p, &amp;rf);
        update_rq_clock(rq);
        post_init_entity_util_avg(&amp;p-&gt;se);

        activate_task(rq, p, ENQUEUE_NOCLOCK);
        p-&gt;on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
        if (p-&gt;sched_class-&gt;task_woken) {
                /*
               * Nothing relies on rq-&gt;lock after this, so its fine to
               * drop it.
               */
                rq_unpin_lock(rq, &amp;rf);
                p-&gt;sched_class-&gt;task_woken(rq, p);
                rq_repin_lock(rq, &amp;rf);
        }
#endif
        task_rq_unlock(rq, p, &amp;rf);
}
// end of wake_up_new_task
</code></pre>

<p>&nbsp;</p>

<p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;7.&nbsp; &nbsp;新进程第一次运行</p>

<p >&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;新进程第一次运行是从函数ret_from_fork开始执行,函数ret_from_fork是由各种处理器架构自定义的函数,ARM64架构定义的函数ret_from_fork代码如下</p>

<pre>
<code>tsk        .req        x28                // current thread_info

ENTRY(ret_from_fork)
        bl        schedule_tail
        cbz        x19, 1f                                // not a kernel thread
        mov        x0, x20
        blr        x19
1:        get_thread_info tsk
        b        ret_to_user
ENDPROC(ret_from_fork)
</code></pre>

<p>&nbsp;</p>
</div><script>                                        var loginstr = '<div class="locked">查看本帖全部内容,请<a href="javascript:;"   style="color:#e60000" class="loginf">登录</a>或者<a href="https://bbs.eeworld.com.cn/member.php?mod=register_eeworld.php&action=wechat" style="color:#e60000" target="_blank">注册</a></div>';
                                       
                                        if(parseInt(discuz_uid)==0){
                                                                                                (function($){
                                                        var postHeight = getTextHeight(400);
                                                        $(".showpostmsg").html($(".showpostmsg").html());
                                                        $(".showpostmsg").after(loginstr);
                                                        $(".showpostmsg").css({height:postHeight,overflow:"hidden"});
                                                })(jQuery);
                                        }                </script><script type="text/javascript">(function(d,c){var a=d.createElement("script"),m=d.getElementsByTagName("script"),eewurl="//counter.eeworld.com.cn/pv/count/";a.src=eewurl+c;m.parentNode.insertBefore(a,m)})(document,523)</script>
页: [1]
查看完整版本: 《Linux内核深度解析》 进程调度和多线程管理