[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2] xen/passthrough: Support a single iommu_domain per xen domain per SMMU



Hello Julien,

On 3/23/2015 5:53 PM, Julien Grall wrote:
> Hello Robert,
> 
> On 23/03/2015 13:46, Robbie VanVossen wrote:
>> @@ -2569,7 +2570,9 @@ static int arm_smmu_assign_dev(struct domain *d, u8 
>> devfn,
>>   {
>>      struct iommu_domain *domain;
>>      struct arm_smmu_xen_domain *xen_domain;
>> -    int ret;
>> +    struct arm_smmu_device *smmu;
>> +    int ret = 0;
>> +    int existing_ctxt_fnd = 0;
> 
> You can use a bool_t here.
> 
Ok.

>>
>>      xen_domain = domain_hvm_iommu(d)->arch.priv;
>>
>> @@ -2585,36 +2588,77 @@ static int arm_smmu_assign_dev(struct domain *d, u8 
>> devfn,
>>                      return ret;
>>      }
>>
>> +    spin_lock(&xen_domain->lock);
>> +
>>      /*
>> -     * TODO: Share the context bank (i.e iommu_domain) when the device is
>> -     * under the same SMMU as another device assigned to this domain.
>> -     * Would it useful for PCI
>> +     * Check to see if a context bank (iommu_domain) already exists for
>> +     * this xen domain under the same SMMU
>>       */
>> -    domain = xzalloc(struct iommu_domain);
>> -    if (!domain)
>> -            return -ENOMEM;
>> +    if (!list_empty(&xen_domain->contexts)) {
>> +            smmu = find_smmu_for_device(dev);
>> +            if (!smmu) {
>> +                    dev_err(dev, "cannot find SMMU\n");
>> +                    ret = -ENXIO;
>> +                    goto out;
>> +            }
>>
>> -    ret = arm_smmu_domain_init(domain);
>> -    if (ret)
>> -            goto err_dom_init;
>> +            /*
>> +             * Loop through the &xen_domain->contexts to locate a context
>> +             * assigned to this SMMU
>> +             */
>> +            list_for_each_entry(domain, &xen_domain->contexts, list) {
>> +                    if(domain->priv->smmu == smmu)
>> +                    {
> 
> Coding style. This should be
> if (domain->priv->smmu == smmu) {
> 
Will fix.

>> +                            /*
>> +                             * We have found a context already associated 
>> with the
>> +                             * same xen domain and SMMU
>> +                             */
>> +                            ret = arm_smmu_attach_dev(domain, dev);
>> +                            if (ret) {
>> +                                    dev_err(dev,
>> +                                            "cannot attach device to 
>> already existing iommu_domain\n");
> 
> We don't print an error message for new domain, I don't think this one 
> is useful.
> 
Will remove dev_err.

>> +                                    goto out;
>> +                            }
>> +                            atomic_inc(&domain->ref);
> 
> Introducing an helper to get the iommu_domain would simply the workflow.
> 
> That would give smth like:
> 
> domain = arm_smmu_get_domain(d, dev);
> if (!domain)
> {
>      allocate domain
>      Add to list dev
> }
> 
> arm_smmu_attach_dev(domain, dev);
> atomic_inc(&domain->ref);
> 
> out:
>     if (failed && atomic == 0)
>       destroy iommu_domain.
> 
> The destroy iommu_domain could be shared with the one in deassign_dev.
> 

But don't we need to handle the failure of attach_dev differently based on which
fails? If the attach_dev fails on a new iommu_domain, then the iommu_domain
should be destroyed. If it fails on a preexisting iommu_domain that is already
attached to another device, then you don't want to destroy the iommu_domain.

>> +
>> +                            existing_ctxt_fnd = 1;
>> +                            break;
>> +                    }
>> +            }
>> +    }
>>
>> -    domain->priv->cfg.domain = d;
>> +    if(!existing_ctxt_fnd){
>>
>> -    ret = arm_smmu_attach_dev(domain, dev);
>> -    if (ret)
>> -            goto err_attach_dev;
>> +            domain = xzalloc(struct iommu_domain);
>> +            if (!domain) {
>> +                    ret = -ENOMEM;
>> +                    goto out;
>> +            }
>>
>> -    spin_lock(&xen_domain->lock);
>> -    /* Chain the new context to the domain */
>> -    list_add(&domain->list, &xen_domain->contexts);
>> -    spin_unlock(&xen_domain->lock);
>> +            ret = arm_smmu_domain_init(domain);
>> +            if (ret)
>> +                    goto err_dom_init;
>>
>> -    return 0;
>> +            domain->priv->cfg.domain = d;
>> +
>> +            ret = arm_smmu_attach_dev(domain, dev);
>> +            if (ret)
>> +                    goto err_attach_dev;
>> +            atomic_inc(&domain->ref);
>> +
>> +            /* Chain the new context to the domain */
>> +            list_add(&domain->list, &xen_domain->contexts);
>> +            
>> +    }
>> +
>> +    goto out;
>>
>>   err_attach_dev:
>>      arm_smmu_domain_destroy(domain);
>>   err_dom_init:
>>      xfree(domain);
>> +out:
>> +    spin_unlock(&xen_domain->lock);
>>
>>      return ret;
>>   }
>> @@ -2631,14 +2675,20 @@ static int arm_smmu_deassign_dev(struct domain *d, 
>> struct device *dev)
>>              return -ESRCH;
>>      }
>>
>> +    spin_lock(&xen_domain->lock);
>> +
>>      arm_smmu_detach_dev(domain, dev);
>> +    atomic_dec(&domain->ref);
>>
>> -    spin_lock(&xen_domain->lock);
>> -    list_del(&domain->list);
>> -    spin_unlock(&xen_domain->lock);
>> +    if (domain->ref.counter == 0)
>> +    {
> 
> Coding style:
> 
> if (...) {
> 
Will Fix

Thanks,
Robbie VanVossen

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.