@@ -3691,11 +3691,9 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
36913691static int intel_iommu_attach_device (struct iommu_domain * domain ,
36923692 struct device * dev )
36933693{
3694- struct device_domain_info * info = dev_iommu_priv_get (dev );
36953694 int ret ;
36963695
3697- if (info -> domain )
3698- device_block_translation (dev );
3696+ device_block_translation (dev );
36993697
37003698 ret = prepare_domain_attach_device (domain , dev );
37013699 if (ret )
@@ -4301,11 +4299,17 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
43014299 struct iommu_domain * domain )
43024300{
43034301 struct device_domain_info * info = dev_iommu_priv_get (dev );
4304- struct dmar_domain * dmar_domain = to_dmar_domain (domain );
43054302 struct dev_pasid_info * curr , * dev_pasid = NULL ;
43064303 struct intel_iommu * iommu = info -> iommu ;
4304+ struct dmar_domain * dmar_domain ;
43074305 unsigned long flags ;
43084306
4307+ if (domain -> type == IOMMU_DOMAIN_IDENTITY ) {
4308+ intel_pasid_tear_down_entry (iommu , dev , pasid , false);
4309+ return ;
4310+ }
4311+
4312+ dmar_domain = to_dmar_domain (domain );
43094313 spin_lock_irqsave (& dmar_domain -> lock , flags );
43104314 list_for_each_entry (curr , & dmar_domain -> dev_pasids , link_domain ) {
43114315 if (curr -> dev == dev && curr -> pasid == pasid ) {
@@ -4532,9 +4536,111 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
45324536 .read_and_clear_dirty = intel_iommu_read_and_clear_dirty ,
45334537};
45344538
4539+ static int context_setup_pass_through (struct device * dev , u8 bus , u8 devfn )
4540+ {
4541+ struct device_domain_info * info = dev_iommu_priv_get (dev );
4542+ struct intel_iommu * iommu = info -> iommu ;
4543+ struct context_entry * context ;
4544+
4545+ spin_lock (& iommu -> lock );
4546+ context = iommu_context_addr (iommu , bus , devfn , 1 );
4547+ if (!context ) {
4548+ spin_unlock (& iommu -> lock );
4549+ return - ENOMEM ;
4550+ }
4551+
4552+ if (context_present (context ) && !context_copied (iommu , bus , devfn )) {
4553+ spin_unlock (& iommu -> lock );
4554+ return 0 ;
4555+ }
4556+
4557+ copied_context_tear_down (iommu , context , bus , devfn );
4558+ context_clear_entry (context );
4559+ context_set_domain_id (context , FLPT_DEFAULT_DID );
4560+
4561+ /*
4562+ * In pass through mode, AW must be programmed to indicate the largest
4563+ * AGAW value supported by hardware. And ASR is ignored by hardware.
4564+ */
4565+ context_set_address_width (context , iommu -> msagaw );
4566+ context_set_translation_type (context , CONTEXT_TT_PASS_THROUGH );
4567+ context_set_fault_enable (context );
4568+ context_set_present (context );
4569+ if (!ecap_coherent (iommu -> ecap ))
4570+ clflush_cache_range (context , sizeof (* context ));
4571+ context_present_cache_flush (iommu , FLPT_DEFAULT_DID , bus , devfn );
4572+ spin_unlock (& iommu -> lock );
4573+
4574+ return 0 ;
4575+ }
4576+
4577+ static int context_setup_pass_through_cb (struct pci_dev * pdev , u16 alias , void * data )
4578+ {
4579+ struct device * dev = data ;
4580+
4581+ if (dev != & pdev -> dev )
4582+ return 0 ;
4583+
4584+ return context_setup_pass_through (dev , PCI_BUS_NUM (alias ), alias & 0xff );
4585+ }
4586+
4587+ static int device_setup_pass_through (struct device * dev )
4588+ {
4589+ struct device_domain_info * info = dev_iommu_priv_get (dev );
4590+
4591+ if (!dev_is_pci (dev ))
4592+ return context_setup_pass_through (dev , info -> bus , info -> devfn );
4593+
4594+ return pci_for_each_dma_alias (to_pci_dev (dev ),
4595+ context_setup_pass_through_cb , dev );
4596+ }
4597+
4598+ static int identity_domain_attach_dev (struct iommu_domain * domain , struct device * dev )
4599+ {
4600+ struct device_domain_info * info = dev_iommu_priv_get (dev );
4601+ struct intel_iommu * iommu = info -> iommu ;
4602+ int ret ;
4603+
4604+ device_block_translation (dev );
4605+
4606+ if (dev_is_real_dma_subdevice (dev ))
4607+ return 0 ;
4608+
4609+ if (sm_supported (iommu )) {
4610+ ret = intel_pasid_setup_pass_through (iommu , dev , IOMMU_NO_PASID );
4611+ if (!ret )
4612+ iommu_enable_pci_caps (info );
4613+ } else {
4614+ ret = device_setup_pass_through (dev );
4615+ }
4616+
4617+ return ret ;
4618+ }
4619+
4620+ static int identity_domain_set_dev_pasid (struct iommu_domain * domain ,
4621+ struct device * dev , ioasid_t pasid )
4622+ {
4623+ struct device_domain_info * info = dev_iommu_priv_get (dev );
4624+ struct intel_iommu * iommu = info -> iommu ;
4625+
4626+ if (!pasid_supported (iommu ) || dev_is_real_dma_subdevice (dev ))
4627+ return - EOPNOTSUPP ;
4628+
4629+ return intel_pasid_setup_pass_through (iommu , dev , pasid );
4630+ }
4631+
4632+ static struct iommu_domain identity_domain = {
4633+ .type = IOMMU_DOMAIN_IDENTITY ,
4634+ .ops = & (const struct iommu_domain_ops ) {
4635+ .attach_dev = identity_domain_attach_dev ,
4636+ .set_dev_pasid = identity_domain_set_dev_pasid ,
4637+ },
4638+ };
4639+
45354640const struct iommu_ops intel_iommu_ops = {
45364641 .blocked_domain = & blocking_domain ,
45374642 .release_domain = & blocking_domain ,
4643+ .identity_domain = & identity_domain ,
45384644 .capable = intel_iommu_capable ,
45394645 .hw_info = intel_iommu_hw_info ,
45404646 .domain_alloc = intel_iommu_domain_alloc ,
0 commit comments