in arm/arm-smmu-v3/arm-smmu-v3.c [3290:3423]
static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
{
int ret;
u32 reg, enables;
struct arm_smmu_cmdq_ent cmd;
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
if (reg & CR0_SMMUEN) {
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
WARN_ON(is_kdump_kernel() && !disable_bypass);
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
}
ret = arm_smmu_device_disable(smmu);
if (ret)
return ret;
/* CR1 (table and queue memory attributes) */
reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) |
FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) |
FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) |
FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) |
FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB);
writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
/* CR2 (random crap) */
reg = CR2_PTM | CR2_RECINVSID;
if (smmu->features & ARM_SMMU_FEAT_E2H)
reg |= CR2_E2H;
writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
/* Stream table */
writeq_relaxed(smmu->strtab_cfg.strtab_base,
smmu->base + ARM_SMMU_STRTAB_BASE);
writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
/* Command queue */
writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
enables = CR0_CMDQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable command queue\n");
return ret;
}
/* Invalidate any cached configuration */
cmd.opcode = CMDQ_OP_CFGI_ALL;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Invalidate any stale TLB entries */
if (smmu->features & ARM_SMMU_FEAT_HYP) {
cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
enables |= CR0_EVTQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable event queue\n");
return ret;
}
/* PRI queue */
if (smmu->features & ARM_SMMU_FEAT_PRI) {
writeq_relaxed(smmu->priq.q.q_base,
smmu->base + ARM_SMMU_PRIQ_BASE);
writel_relaxed(smmu->priq.q.llq.prod,
smmu->page1 + ARM_SMMU_PRIQ_PROD);
writel_relaxed(smmu->priq.q.llq.cons,
smmu->page1 + ARM_SMMU_PRIQ_CONS);
enables |= CR0_PRIQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable PRI queue\n");
return ret;
}
}
if (smmu->features & ARM_SMMU_FEAT_ATS) {
enables |= CR0_ATSCHK;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable ATS check\n");
return ret;
}
}
ret = arm_smmu_setup_irqs(smmu);
if (ret) {
dev_err(smmu->dev, "failed to setup irqs\n");
return ret;
}
if (is_kdump_kernel())
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
/* Enable the SMMU interface, or ensure bypass */
if (!bypass || disable_bypass) {
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
if (ret)
return ret;
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
dev_err(smmu->dev, "failed to enable SMMU interface\n");
return ret;
}
return 0;
}