2009년 11월 19일 목요일

struct cfs_rq

  423 /* CFS-related fields in a runqueue */
  424 struct cfs_rq {
  425     struct load_weight load;
  426     unsigned long nr_running;
  427
  428     u64 exec_clock;
  429     u64 min_vruntime;
  430
  431     struct rb_root tasks_timeline;
  432     struct rb_node *rb_leftmost;
  433
  434     struct list_head tasks;
  435     struct list_head *balance_iterator;
  436
  437     /*
  438      * 'curr' points to currently running entity on this cfs_rq.
  439      * It is set to NULL otherwise (i.e when none are currently running).
  440      */
  441     struct sched_entity *curr, *next, *last;
  442
  443     unsigned int nr_spread_over;
  444
  445 #ifdef CONFIG_FAIR_GROUP_SCHED
  446     struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
  447
  448     /*
  449      * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  450      * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  451      * (like users, containers etc.)
  452      *
  453      * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  454      * list is used during load balance.
  455      */
  456     struct list_head leaf_cfs_rq_list;
  457     struct task_group *tg;  /* group that "owns" this runqueue */
  458
  459 #ifdef CONFIG_SMP
  460     /*
  461      * the part of load.weight contributed by tasks
  462      */
  463     unsigned long task_weight;
  464
  465     /*
  466      *   h_load = weight * f(tg)
  467      *
  468      * Where f(tg) is the recursive weight fraction assigned to
  469      * this group.
  470      */
  471     unsigned long h_load;
  472
  473     /*
  474      * this cpu's part of tg->shares
  475      */
  476     unsigned long shares;
  477
  478     /*
  479      * load.weight at the time we set shares
  480      */
  481     unsigned long rq_weight;
  482 #endif
  483 #endif
  484 };

 

댓글 없음:

댓글 쓰기