[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] xen,credit1: Add variable timeslice


  • To: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxx>
  • Date: Tue, 13 Sep 2011 10:44:06 +0100
  • Cc: Tim Deegan <tim@xxxxxxx>
  • Delivery-date: Tue, 13 Sep 2011 02:45:20 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: Acxx+a2WmH73pbfm+kS3KRKQYlmA/A==
  • Thread-topic: [Xen-devel] [PATCH] xen,credit1: Add variable timeslice

On 13/09/2011 10:25, "George Dunlap" <George.Dunlap@xxxxxxxxxxxxx> wrote:

> Ping?

Now done.

 -- Keir

> On Thu, Sep 1, 2011 at 4:30 PM, George Dunlap
> <george.dunlap@xxxxxxxxxxxxx> wrote:
>> Add a xen command-line parameter, sched_credit_tslice_ms,
>> to set the timeslice of the credit1 scheduler.
>> 
>> Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
>> 
>> diff -r 4a4882df5649 -r 782284c5b1bc xen/common/sched_credit.c
>> --- a/xen/common/sched_credit.c Wed Aug 31 15:23:49 2011 +0100
>> +++ b/xen/common/sched_credit.c Thu Sep 01 16:29:50 2011 +0100
>> @@ -41,15 +41,9 @@
>>  */
>>  #define CSCHED_DEFAULT_WEIGHT       256
>>  #define CSCHED_TICKS_PER_TSLICE     3
>> -#define CSCHED_TICKS_PER_ACCT       3
>> -#define CSCHED_MSECS_PER_TICK       10
>> -#define CSCHED_MSECS_PER_TSLICE     \
>> -    (CSCHED_MSECS_PER_TICK * CSCHED_TICKS_PER_TSLICE)
>> +/* Default timeslice: 30ms */
>> +#define CSCHED_DEFAULT_TSLICE_MS    30
>>  #define CSCHED_CREDITS_PER_MSEC     10
>> -#define CSCHED_CREDITS_PER_TSLICE   \
>> -    (CSCHED_CREDITS_PER_MSEC * CSCHED_MSECS_PER_TSLICE)
>> -#define CSCHED_CREDITS_PER_ACCT     \
>> -    (CSCHED_CREDITS_PER_MSEC * CSCHED_MSECS_PER_TICK *
>> CSCHED_TICKS_PER_ACCT)
>> 
>> 
>>  /*
>> @@ -113,6 +107,8 @@
>>  */
>>  static bool_t __read_mostly sched_credit_default_yield;
>>  boolean_param("sched_credit_default_yield", sched_credit_default_yield);
>> +static int __read_mostly sched_credit_tslice_ms = CSCHED_DEFAULT_TSLICE_MS;
>> +integer_param("sched_credit_tslice_ms", sched_credit_tslice_ms);
>> 
>>  /*
>>  * Physical CPU
>> @@ -176,6 +172,9 @@ struct csched_private {
>>     uint32_t credit;
>>     int credit_balance;
>>     uint32_t runq_sort;
>> +    /* Period of master and tick in milliseconds */
>> +    unsigned tslice_ms, tick_period_us, ticks_per_tslice;
>> +    unsigned credits_per_tslice;
>>  };
>> 
>>  static void csched_tick(void *_cpu);
>> @@ -326,7 +325,7 @@ csched_free_pdata(const struct scheduler
>> 
>>     spin_lock_irqsave(&prv->lock, flags);
>> 
>> -    prv->credit -= CSCHED_CREDITS_PER_ACCT;
>> +    prv->credit -= prv->credits_per_tslice;
>>     prv->ncpus--;
>>     cpu_clear(cpu, prv->idlers);
>>     cpu_clear(cpu, prv->cpus);
>> @@ -360,19 +359,19 @@ csched_alloc_pdata(const struct schedule
>>     spin_lock_irqsave(&prv->lock, flags);
>> 
>>     /* Initialize/update system-wide config */
>> -    prv->credit += CSCHED_CREDITS_PER_ACCT;
>> +    prv->credit += prv->credits_per_tslice;
>>     prv->ncpus++;
>>     cpu_set(cpu, prv->cpus);
>>     if ( prv->ncpus == 1 )
>>     {
>>         prv->master = cpu;
>>         init_timer(&prv->master_ticker, csched_acct, prv, cpu);
>> -        set_timer(&prv->master_ticker, NOW() +
>> -                  MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT);
>> +        set_timer(&prv->master_ticker,
>> +                  NOW() + MILLISECS(prv->tslice_ms));
>>     }
>> 
>>     init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu);
>> -    set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
>> +    set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
>> 
>>     INIT_LIST_HEAD(&spc->runq);
>>     spc->runq_sort_last = prv->runq_sort;
>> @@ -1002,7 +1001,7 @@ csched_acct(void* dummy)
>>          * for one full accounting period. We allow a domain to earn more
>>          * only when the system-wide credit balance is negative.
>>          */
>> -        credit_peak = sdom->active_vcpu_count * CSCHED_CREDITS_PER_ACCT;
>> +        credit_peak = sdom->active_vcpu_count * prv->credits_per_tslice;
>>         if ( prv->credit_balance < 0 )
>>         {
>>             credit_peak += ( ( -prv->credit_balance
>> @@ -1014,7 +1013,7 @@ csched_acct(void* dummy)
>> 
>>         if ( sdom->cap != 0U )
>>         {
>> -            credit_cap = ((sdom->cap * CSCHED_CREDITS_PER_ACCT) + 99) / 100;
>> +            credit_cap = ((sdom->cap * prv->credits_per_tslice) + 99) / 100;
>>             if ( credit_cap < credit_peak )
>>                 credit_peak = credit_cap;
>> 
>> @@ -1092,10 +1091,10 @@ csched_acct(void* dummy)
>>                 }
>> 
>>                 /* Lower bound on credits */
>> -                if ( credit < -CSCHED_CREDITS_PER_TSLICE )
>> +                if ( credit < -prv->credits_per_tslice )
>>                 {
>>                     CSCHED_STAT_CRANK(acct_min_credit);
>> -                    credit = -CSCHED_CREDITS_PER_TSLICE;
>> +                    credit = -prv->credits_per_tslice;
>>                     atomic_set(&svc->credit, credit);
>>                 }
>>             }
>> @@ -1117,7 +1116,7 @@ csched_acct(void* dummy)
>>                 }
>> 
>>                 /* Upper bound on credits means VCPU stops earning */
>> -                if ( credit > CSCHED_CREDITS_PER_TSLICE )
>> +                if ( credit > prv->credits_per_tslice )
>>                 {
>>                     __csched_vcpu_acct_stop_locked(prv, svc);
>>                     /* Divide credits in half, so that when it starts
>> @@ -1141,8 +1140,8 @@ csched_acct(void* dummy)
>>     prv->runq_sort++;
>> 
>>  out:
>> -    set_timer( &prv->master_ticker, NOW() +
>> -            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
>> +    set_timer( &prv->master_ticker,
>> +               NOW() + MILLISECS(prv->tslice_ms));
>>  }
>> 
>>  static void
>> @@ -1169,7 +1168,7 @@ csched_tick(void *_cpu)
>>      */
>>     csched_runq_sort(prv, cpu);
>> 
>> -    set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
>> +    set_timer(&spc->ticker, NOW() + MICROSECS(prv->tick_period_us) );
>>  }
>> 
>>  static struct csched_vcpu *
>> @@ -1375,7 +1374,7 @@ csched_schedule(
>>      * Return task to run next...
>>      */
>>     ret.time = (is_idle_vcpu(snext->vcpu) ?
>> -                -1 : MILLISECS(CSCHED_MSECS_PER_TSLICE));
>> +                -1 : MILLISECS(prv->tslice_ms));
>>     ret.task = snext->vcpu;
>> 
>>     CSCHED_VCPU_CHECK(ret.task);
>> @@ -1469,10 +1468,9 @@ csched_dump(const struct scheduler *ops)
>>            "\tweight             = %u\n"
>>            "\trunq_sort          = %u\n"
>>            "\tdefault-weight     = %d\n"
>> -           "\tmsecs per tick     = %dms\n"
>> +           "\ttslice             = %dms\n"
>>            "\tcredits per msec   = %d\n"
>>            "\tticks per tslice   = %d\n"
>> -           "\tticks per acct     = %d\n"
>>            "\tmigration delay    = %uus\n",
>>            prv->ncpus,
>>            prv->master,
>> @@ -1481,10 +1479,9 @@ csched_dump(const struct scheduler *ops)
>>            prv->weight,
>>            prv->runq_sort,
>>            CSCHED_DEFAULT_WEIGHT,
>> -           CSCHED_MSECS_PER_TICK,
>> +           prv->tslice_ms,
>>            CSCHED_CREDITS_PER_MSEC,
>> -           CSCHED_TICKS_PER_TSLICE,
>> -           CSCHED_TICKS_PER_ACCT,
>> +           prv->ticks_per_tslice,
>>            vcpu_migration_delay);
>> 
>>     cpumask_scnprintf(idlers_buf, sizeof(idlers_buf), prv->idlers);
>> @@ -1526,6 +1523,13 @@ csched_init(struct scheduler *ops)
>>     INIT_LIST_HEAD(&prv->active_sdom);
>>     prv->master = UINT_MAX;
>> 
>> +    prv->tslice_ms = sched_credit_tslice_ms;
>> +    prv->ticks_per_tslice = CSCHED_TICKS_PER_TSLICE;
>> +    if ( prv->tslice_ms < prv->ticks_per_tslice )
>> +        prv->ticks_per_tslice = 1;
>> +    prv->tick_period_us = prv->tslice_ms * 1000 / prv->ticks_per_tslice;
>> +    prv->credits_per_tslice = CSCHED_CREDITS_PER_MSEC * prv->tslice_ms;
>> +
>>     return 0;
>>  }
>> 
>> @@ -1550,13 +1554,16 @@ static void csched_tick_suspend(const st
>> 
>>  static void csched_tick_resume(const struct scheduler *ops, unsigned int
>> cpu)
>>  {
>> +    struct csched_private *prv;
>>     struct csched_pcpu *spc;
>>     uint64_t now = NOW();
>> 
>>     spc = CSCHED_PCPU(cpu);
>> 
>> -    set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK)
>> -            - now % MILLISECS(CSCHED_MSECS_PER_TICK) );
>> +    prv = CSCHED_PRIV(ops);
>> +
>> +    set_timer(&spc->ticker, now + MICROSECS(prv->tick_period_us)
>> +            - now % MICROSECS(prv->tick_period_us) );
>>  }
>> 
>>  static struct csched_private _csched_priv;
>> 
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@xxxxxxxxxxxxxxxxxxx
>> http://lists.xensource.com/xen-devel
>> 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.