sched.c
1835 /*
1836 * context_switch - switch to the new MM and the new
1837 * thread's register state.
1838 */
1839 static inline struct task_struct *
1840 context_switch(struct rq *rq, struct task_struct *prev,
1841 struct task_struct *next)
1842 {
1843 struct mm_struct *mm = next->mm;
1844 struct mm_struct *oldmm = prev->active_mm;
1845
1846 if (!mm) {
1847 next->active_mm = oldmm;
1848 atomic_inc(&oldmm->mm_count);
1849 enter_lazy_tlb(oldmm, next);
1850 } else
1851 switch_mm(oldmm, mm, next);
1852
1853 if (!prev->mm) {
1854 prev->active_mm = NULL;
1855 WARN_ON(rq->prev_mm);
1856 rq->prev_mm = oldmm;
1857 }
1858 /*
1859 * Since the runqueue lock will be released by the next
1860 * task (which is an invalid locking op but in the case
1861 * of the scheduler it's an obvious special-case), so we
1862 * do an early lockdep release here:
1863 */
1864 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
1865 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1866 #endif
1867
1868 /* Here we just switch the register state and the stack. */
1869 switch_to(prev, next, prev);
1870
1870
1871 return prev;
1872 }
댓글 없음:
댓글 쓰기