2009년 11월 13일 금요일

3136 static void task_running_tick(struct rq *rq, struct task_struct *p)

/* time_slice가 0이되면  iterative 와

expired배열의 굶주린 정도를 따져서 알맞은 배열에 삽입한다

*/

3166     if (!--p->time_slice) {
3167         dequeue_task(p, rq->active);
3168         set_tsk_need_resched(p);
3169         p->prio = effective_prio(p);
3170         p->time_slice = task_timeslice(p);
3171         p->first_time_slice = 0;
3172
3173         if (!rq->expired_timestamp)
3174             rq->expired_timestamp = jiffies;
3175         if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
3176             enqueue_task(p, rq->expired);
3177             if (p->static_prio < rq->best_expired_prio)
3178                 rq->best_expired_prio = p->static_prio;
3179         } else
3180             enqueue_task(p, rq->active);
3181     } else {
3182         /*
3183          * Prevent a too long timeslice allowing a task to monopolize
3184          * the CPU. We do this by splitting up the timeslice into
3185          * smaller pieces.
3186          *
3187          * Note: this does not mean the task's timeslices expire or
3188          * get lost in any way, they just might be preempted by
3189          * another task of equal priority. (one with higher
3190          * priority would have preempted this task already.) We
3191          * requeue this task to the end of the list on this priority
3192          * level, which is in essence a round-robin of tasks with
3193          * equal priority.
3194          *
3195          * This only applies to tasks in the interactive
3196          * delta range with at least TIMESLICE_GRANULARITY to requeue.
3197          */
3198         if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
3199             p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
3200             (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
3201             (p->array == rq->active)) {
3202
3203             requeue_task(p, rq->active);
3204             set_tsk_need_resched(p);
3205         }

댓글 없음:

댓글 쓰기