在Linux内核中,3个系统调用可以用来创建新的进程。
- fork(分叉):子进程是父进程的一个副本,采用了写时复制的技术
- vfork:用于创建子进程,之后子进程立即调用execve以装载新程序的情况。为了避免复制物理页,父进程会睡眠等待子进程装载新程序。现在fork采用了写时复制的技术,vfork失去了速度优势,已经被废弃。
- clone(克隆):可以精确地控制子进程和父进程共享哪些资源。这个系统调用的主要用处是可供pthread库用来创建线程。其中clone是功能最齐全的函数,参数多,使用复杂,fork是clone的简化函数。
- 函数_do_fork,代码如下
- long _do_fork(unsigned long clone_flags,
- unsigned long stack_start,
- unsigned long stack_size,
- int __user *parent_tidptr,
- int __user *child_tidptr,
- unsigned long tls)
-
5. 函数copy_process
创建新进程的主要工作由函数copy_process完成,代码如下
- static __latent_entropy struct task_struct *copy_process(
- unsigned long clone_flags,
- unsigned long stack_start,
- unsigned long stack_size,
- int __user *child_tidptr,
- struct pid *pid,
- int trace,
- unsigned long tls,
- int node)
- {
- int retval;
- struct task_struct *p;
-
- if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
- return ERR_PTR(-EINVAL);
-
- if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
- return ERR_PTR(-EINVAL);
-
- /*
- * Thread groups must share signals as well, and detached threads
- * can only be started up within the thread group.
- */
- if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
- return ERR_PTR(-EINVAL);
-
- /*
- * Shared signal handlers imply shared VM. By way of the above,
- * thread groups also imply shared VM. Blocking this case allows
- * for various simplifications in other code.
- */
- if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
- return ERR_PTR(-EINVAL);
-
- /*
- * Siblings of global init remain as zombies on exit since they are
- * not reaped by their parent (swapper). To solve this and to avoid
- * multi-rooted process trees, prevent global and container-inits
- * from creating siblings.
- */
- if ((clone_flags & CLONE_PARENT) &&
- current->signal->flags & SIGNAL_UNKILLABLE)
- return ERR_PTR(-EINVAL);
-
- /*
- * If the new process will be in a different pid or user namespace
- * do not allow it to share a thread group with the forking task.
- */
- if (clone_flags & CLONE_THREAD) {
- if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
- (task_active_pid_ns(current) !=
- current->nsproxy->pid_ns_for_children))
- return ERR_PTR(-EINVAL);
- }
-
- retval = security_task_create(clone_flags);
- if (retval)
- goto fork_out;
-
- retval = -ENOMEM;
- p = dup_task_struct(current, node);
- if (!p)
- goto fork_out;
-
- /*
- * This _must_ happen before we call free_task(), i.e. before we jump
- * to any of the bad_fork_* labels. This is to avoid freeing
- * p->set_child_tid which is (ab)used as a kthread's data pointer for
- * kernel threads (PF_KTHREAD).
- */
- p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
- /*
- * Clear TID on mm_release()?
- */
- p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
-
- ftrace_graph_init_task(p);
-
- rt_mutex_init_task(p);
-
-
- DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
- DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
-
- retval = -EAGAIN;
- if (atomic_read(&p->real_cred->user->processes) >=
- task_rlimit(p, RLIMIT_NPROC)) {
- if (p->real_cred->user != INIT_USER &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
- goto bad_fork_free;
- }
- current->flags &= ~PF_NPROC_EXCEEDED;
-
- retval = copy_creds(p, clone_flags);
- if (retval < 0)
- goto bad_fork_free;
-
- /*
- * If multiple threads are within copy_process(), then this check
- * triggers too late. This doesn't hurt, the check is only there
- * to stop root fork bombs.
- */
- retval = -EAGAIN;
- if (nr_threads >= max_threads)
- goto bad_fork_cleanup_count;
-
- delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
- p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
- p->flags |= PF_FORKNOEXEC;
- INIT_LIST_HEAD(&p->children);
- INIT_LIST_HEAD(&p->sibling);
- rcu_copy_process(p);
- p->vfork_done = NULL;
- spin_lock_init(&p->alloc_lock);
-
- init_sigpending(&p->pending);
-
- p->utime = p->stime = p->gtime = 0;
-
- p->utimescaled = p->stimescaled = 0;
-
- prev_cputime_init(&p->prev_cputime);
-
-
- seqcount_init(&p->vtime_seqcount);
- p->vtime_snap = 0;
- p->vtime_snap_whence = VTIME_INACTIVE;
-
-
-
- memset(&p->rss_stat, 0, sizeof(p->rss_stat));
-
-
- p->default_timer_slack_ns = current->timer_slack_ns;
-
- task_io_accounting_init(&p->ioac);
- acct_clear_integrals(p);
-
- posix_cpu_timers_init(p);
-
- p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boot_ns();
- p->io_context = NULL;
- p->audit_context = NULL;
- cgroup_fork(p);
-
- p->mempolicy = mpol_dup(p->mempolicy);
- if (IS_ERR(p->mempolicy)) {
- retval = PTR_ERR(p->mempolicy);
- p->mempolicy = NULL;
- goto bad_fork_cleanup_threadgroup_lock;
- }
-
-
- p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
- p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
- seqcount_init(&p->mems_allowed_seq);
-
-
- p->irq_events = 0;
- p->hardirqs_enabled = 0;
- p->hardirq_enable_ip = 0;
- p->hardirq_enable_event = 0;
- p->hardirq_disable_ip = _THIS_IP_;
- p->hardirq_disable_event = 0;
- p->softirqs_enabled = 1;
- p->softirq_enable_ip = _THIS_IP_;
- p->softirq_enable_event = 0;
- p->softirq_disable_ip = 0;
- p->softirq_disable_event = 0;
- p->hardirq_context = 0;
- p->softirq_context = 0;
-
-
- p->pagefault_disabled = 0;
-
-
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
- p->lockdep_recursion = 0;
-
-
-
- p->blocked_on = NULL; /* not blocked yet */
-
-
- p->sequential_io = 0;
- p->sequential_io_avg = 0;
-
-
- /* Perform scheduler related setup. Assign this task to a CPU. */
- retval = sched_fork(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_policy;
-
- retval = perf_event_init_task(p);
- if (retval)
- goto bad_fork_cleanup_policy;
- retval = audit_alloc(p);
- if (retval)
- goto bad_fork_cleanup_perf;
- /* copy all the process information */
- shm_init_task(p);
- retval = security_task_alloc(p, clone_flags);
- if (retval)
- goto bad_fork_cleanup_audit;
- retval = copy_semundo(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_security;
- retval = copy_files(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_semundo;
- retval = copy_fs(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_files;
- retval = copy_sighand(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_fs;
- retval = copy_signal(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_sighand;
- retval = copy_mm(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_signal;
- retval = copy_namespaces(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_mm;
- retval = copy_io(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_namespaces;
- retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
- if (retval)
- goto bad_fork_cleanup_io;
-
- if (pid != &init_struct_pid) {
- pid = alloc_pid(p->nsproxy->pid_ns_for_children);
- if (IS_ERR(pid)) {
- retval = PTR_ERR(pid);
- goto bad_fork_cleanup_thread;
- }
- }
-
-
- p->plug = NULL;
-
-
- p->robust_list = NULL;
-
- p->compat_robust_list = NULL;
-
- INIT_LIST_HEAD(&p->pi_state_list);
- p->pi_state_cache = NULL;
-
- /*
- * sigaltstack should be cleared when sharing the same VM
- */
- if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
- sas_ss_reset(p);
-
- /*
- * Syscall tracing and stepping should be turned off in the
- * child regardless of CLONE_PTRACE.
- */
- user_disable_single_step(p);
- clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
-
- clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
-
- clear_all_latency_tracing(p);
-
- /* ok, now we should be set up.. */
- p->pid = pid_nr(pid);
- if (clone_flags & CLONE_THREAD) {
- p->exit_signal = -1;
- p->group_leader = current->group_leader;
- p->tgid = current->tgid;
- } else {
- if (clone_flags & CLONE_PARENT)
- p->exit_signal = current->group_leader->exit_signal;
- else
- p->exit_signal = (clone_flags & CSIGNAL);
- p->group_leader = p;
- p->tgid = p->pid;
- }
-
- p->nr_dirtied = 0;
- p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
- p->dirty_paused_when = 0;
-
- p->pdeath_signal = 0;
- INIT_LIST_HEAD(&p->thread_group);
- p->task_works = NULL;
-
- cgroup_threadgroup_change_begin(current);
- /*
- * Ensure that the cgroup subsystem policies allow the new process to be
- * forked. It should be noted the the new process's css_set can be changed
- * between here and cgroup_post_fork() if an organisation operation is in
- * progress.
- */
- retval = cgroup_can_fork(p);
- if (retval)
- goto bad_fork_free_pid;
-
- /*
- * Make it visible to the rest of the system, but dont wake it up yet.
- * Need tasklist lock for parent etc handling!
- */
- write_lock_irq(&tasklist_lock);
-
- /* CLONE_PARENT re-uses the old parent */
- if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
- p->real_parent = current->real_parent;
- p->parent_exec_id = current->parent_exec_id;
- } else {
- p->real_parent = current;
- p->parent_exec_id = current->self_exec_id;
- }
-
- klp_copy_process(p);
-
- spin_lock(¤t->sighand->siglock);
-
- /*
- * Copy seccomp details explicitly here, in case they were changed
- * before holding sighand lock.
- */
- copy_seccomp(p);
-
- /*
- * Process group and session signals need to be delivered to just the
- * parent before the fork or both the parent and the child after the
- * fork. Restart if a signal comes in before we add the new process to
- * it's process group.
- * A fatal signal pending means that current will exit, so the new
- * thread can't slip out of an OOM kill (or normal SIGKILL).
- */
- recalc_sigpending();
- if (signal_pending(current)) {
- retval = -ERESTARTNOINTR;
- goto bad_fork_cancel_cgroup;
- }
- if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
- retval = -ENOMEM;
- goto bad_fork_cancel_cgroup;
- }
-
- if (likely(p->pid)) {
- ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
-
- init_task_pid(p, PIDTYPE_PID, pid);
- if (thread_group_leader(p)) {
- init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
- init_task_pid(p, PIDTYPE_SID, task_session(current));
-
- if (is_child_reaper(pid)) {
- ns_of_pid(pid)->child_reaper = p;
- p->signal->flags |= SIGNAL_UNKILLABLE;
- }
-
- p->signal->leader_pid = pid;
- p->signal->tty = tty_kref_get(current->signal->tty);
- /*
- * Inherit has_child_subreaper flag under the same
- * tasklist_lock with adding child to the process tree
- * for propagate_has_child_subreaper optimization.
- */
- p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
- p->real_parent->signal->is_child_subreaper;
- list_add_tail(&p->sibling, &p->real_parent->children);
- list_add_tail_rcu(&p->tasks, &init_task.tasks);
- attach_pid(p, PIDTYPE_PGID);
- attach_pid(p, PIDTYPE_SID);
- __this_cpu_inc(process_counts);
- } else {
- current->signal->nr_threads++;
- atomic_inc(¤t->signal->live);
- atomic_inc(¤t->signal->sigcnt);
- list_add_tail_rcu(&p->thread_group,
- &p->group_leader->thread_group);
- list_add_tail_rcu(&p->thread_node,
- &p->signal->thread_head);
- }
- attach_pid(p, PIDTYPE_PID);
- nr_threads++;
- }
-
- total_forks++;
- spin_unlock(¤t->sighand->siglock);
- syscall_tracepoint_update(p);
- write_unlock_irq(&tasklist_lock);
-
- proc_fork_connector(p);
- cgroup_post_fork(p);
- cgroup_threadgroup_change_end(current);
- perf_event_fork(p);
-
- trace_task_newtask(p, clone_flags);
- uprobe_copy_process(p, clone_flags);
-
- return p;
-
- bad_fork_cancel_cgroup:
- spin_unlock(¤t->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
- cgroup_cancel_fork(p);
- bad_fork_free_pid:
- cgroup_threadgroup_change_end(current);
- if (pid != &init_struct_pid)
- free_pid(pid);
- bad_fork_cleanup_thread:
- exit_thread(p);
- bad_fork_cleanup_io:
- if (p->io_context)
- exit_io_context(p);
- bad_fork_cleanup_namespaces:
- exit_task_namespaces(p);
- bad_fork_cleanup_mm:
- if (p->mm)
- mmput(p->mm);
- bad_fork_cleanup_signal:
- if (!(clone_flags & CLONE_THREAD))
- free_signal_struct(p->signal);
- bad_fork_cleanup_sighand:
- __cleanup_sighand(p->sighand);
- bad_fork_cleanup_fs:
- exit_fs(p); /* blocking */
- bad_fork_cleanup_files:
- exit_files(p); /* blocking */
- bad_fork_cleanup_semundo:
- exit_sem(p);
- bad_fork_cleanup_security:
- security_task_free(p);
- bad_fork_cleanup_audit:
- audit_free(p);
- bad_fork_cleanup_perf:
- perf_event_free_task(p);
- bad_fork_cleanup_policy:
-
- mpol_put(p->mempolicy);
- bad_fork_cleanup_threadgroup_lock:
-
- delayacct_tsk_free(p);
- bad_fork_cleanup_count:
- atomic_dec(&p->cred->user->processes);
- exit_creds(p);
- bad_fork_free:
- p->state = TASK_DEAD;
- put_task_stack(p);
- free_task(p);
- fork_out:
- return ERR_PTR(retval);
- }
- //end copy_process
-
6. 唤醒新进程
函数wake_up_new_task负责唤醒刚刚创建的新进程,代码如下
- void wake_up_new_task(struct task_struct *p)
- {
- struct rq_flags rf;
- struct rq *rq;
-
- raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
- p->state = TASK_RUNNING;
-
- /*
- * Fork balancing, do it here and not earlier because:
- * - cpus_allowed can change in the fork path
- * - any previously selected CPU might disappear through hotplug
- *
- * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
- * as we're not fully set-up yet.
- */
- __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
-
- rq = __task_rq_lock(p, &rf);
- update_rq_clock(rq);
- post_init_entity_util_avg(&p->se);
-
- activate_task(rq, p, ENQUEUE_NOCLOCK);
- p->on_rq = TASK_ON_RQ_QUEUED;
- trace_sched_wakeup_new(p);
- check_preempt_curr(rq, p, WF_FORK);
-
- if (p->sched_class->task_woken) {
- /*
- * Nothing relies on rq->lock after this, so its fine to
- * drop it.
- */
- rq_unpin_lock(rq, &rf);
- p->sched_class->task_woken(rq, p);
- rq_repin_lock(rq, &rf);
- }
-
- task_rq_unlock(rq, p, &rf);
- }
- // end of wake_up_new_task
-
7. 新进程第一次运行
新进程第一次运行是从函数ret_from_fork开始执行,函数ret_from_fork是由各种处理器架构自定义的函数,ARM64架构定义的函数ret_from_fork代码如下
- tsk .req x28 // current thread_info
-
- ENTRY(ret_from_fork)
- bl schedule_tail
- cbz x19, 1f // not a kernel thread
- mov x0, x20
- blr x19
- 1: get_thread_info tsk
- b ret_to_user
- ENDPROC(ret_from_fork)
-