[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH V4 1/3] xen/mem_access: Support for memory-content hiding



>>> On 08.07.15 at 12:22, <rcojocaru@xxxxxxxxxxxxxxx> wrote:
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -653,6 +653,31 @@ static int hvmemul_read(
>      unsigned int bytes,
>      struct x86_emulate_ctxt *ctxt)
>  {
> +    struct hvm_emulate_ctxt *hvmemul_ctxt =
> +        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> +
> +    if ( unlikely(hvmemul_ctxt->set_context) )
> +    {
> +        struct vcpu *curr = current;
> +        unsigned int safe_bytes;
> +
> +        if ( !curr->arch.vm_event.emul_read_data )
> +            return X86EMUL_UNHANDLEABLE;
> +
> +        safe_bytes = min_t(unsigned int,
> +                           bytes, curr->arch.vm_event.emul_read_data->size);

Afaict min() would work here.

> +        if ( safe_bytes )
> +        {
> +            memcpy(p_data, curr->arch.vm_event.emul_read_data->data, 
> safe_bytes);

Why is this conditional? Doesn't ->size being zero mean all data
should be zeroed? Otherwise this isn't really consistent behavior...

Also - long line.

> +
> +            if ( bytes > safe_bytes )
> +                memset(p_data + safe_bytes, 0, bytes - safe_bytes);

No need for the conditional here.

> @@ -893,6 +918,28 @@ static int hvmemul_cmpxchg(
>      unsigned int bytes,
>      struct x86_emulate_ctxt *ctxt)
>  {
> +    struct hvm_emulate_ctxt *hvmemul_ctxt =
> +        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> +
> +    if ( unlikely(hvmemul_ctxt->set_context) )
> +    {
> +        struct vcpu *curr = current;
> +
> +        if ( curr->arch.vm_event.emul_read_data )
> +        {
> +            unsigned int safe_bytes = min_t(unsigned int, bytes,
> +                curr->arch.vm_event.emul_read_data->size);
> +
> +            memcpy(p_new, curr->arch.vm_event.emul_read_data->data,
> +                   safe_bytes);
> +
> +            if ( bytes > safe_bytes )
> +                memset(p_new + safe_bytes, 0, bytes - safe_bytes);
> +        }
> +        else
> +            return X86EMUL_UNHANDLEABLE;

Please make this look as similar to hvmemul_read() as possible (invert
the if() condition etc). Once done, you'll be seeing that this would
better be factored out...

> @@ -935,6 +982,43 @@ static int hvmemul_rep_ins(
>                                 !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
>  }
>  
> +static int hvmemul_rep_outs_set_context(
> +    enum x86_segment src_seg,
> +    unsigned long src_offset,
> +    uint16_t dst_port,
> +    unsigned int bytes_per_rep,
> +    unsigned long *reps,
> +    struct x86_emulate_ctxt *ctxt)
> +{
> +    unsigned int bytes = *reps * bytes_per_rep;
> +    struct vcpu *curr = current;
> +    unsigned int safe_bytes;
> +    char *buf = NULL;

Pointless initializer.

> @@ -1063,7 +1151,23 @@ static int hvmemul_rep_movs(
>       */
>      rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
>      if ( rc == HVMCOPY_okay )
> +    {
> +        struct vcpu *curr = current;
> +
> +        if ( unlikely(hvmemul_ctxt->set_context) &&
> +             curr->arch.vm_event.emul_read_data )

Factoring out will also make obvious that here you're _still_ not
handling things consistently with the other cases.

> @@ -1466,6 +1466,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
>          }
>  
>          v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
> +
> +        if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA &&

Please parenthesize the &.

> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -166,6 +166,7 @@ struct vcpu *alloc_vcpu(
>          free_cpumask_var(v->cpu_hard_affinity_saved);
>          free_cpumask_var(v->cpu_soft_affinity);
>          free_cpumask_var(v->vcpu_dirty_cpumask);
> +        xfree(v->arch.vm_event.emul_read_data);
>          free_vcpu_struct(v);
>          return NULL;
>      }
> @@ -821,6 +822,7 @@ static void complete_domain_destroy(struct rcu_head *head)
>              free_cpumask_var(v->cpu_hard_affinity_saved);
>              free_cpumask_var(v->cpu_soft_affinity);
>              free_cpumask_var(v->vcpu_dirty_cpumask);
> +            xfree(v->arch.vm_event.emul_read_data);

Common code fiddling with arch-specific bits. I can't see how this
would build on ARM.

> @@ -63,6 +64,21 @@ static int vm_event_enable(
>      vm_event_ring_lock_init(ved);
>      vm_event_ring_lock(ved);
>  
> +    for_each_vcpu( d, v )
> +    {
> +        if ( v->arch.vm_event.emul_read_data )
> +            continue;
> +
> +        v->arch.vm_event.emul_read_data =
> +            xmalloc(struct vm_event_emul_read_data);
> +
> +        if ( !v->arch.vm_event.emul_read_data )
> +        {
> +            rc = -ENOMEM;
> +            goto err;
> +        }
> +    }

Same here. You need to decide whether this is to be a generic
feature (then the pointer needs to move to struct vcpu) or x86
specific (then the code needs to move to x86-specific files).

> @@ -189,6 +198,12 @@ struct vm_event_sharing {
>      uint32_t _pad;
>  };
>  
> +struct vm_event_emul_read_data {
> +    uint32_t size;
> +    /* The struct is used in a union with vm_event_regs_x86. */
> +    uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];

Again, albeit here I can see no good alternative at this point.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.