def get_rules()

in plugins/spark_upgrade/spark_config/__init__.py [0:0]


    def get_rules(self) -> List[Rule]:
        # filters cannot be added without reinstantiating Rule(), so we create the full filter set before
        fs = {
            Filter(
                not_enclosing_node='cs new SparkConf().set("spark.sql.legacy.timeParserPolicy","LEGACY").set("spark.sql.legacy.allowUntypedScalaUDF", "true")'
            ),
        }
        if self.language == "java":
            fs.add(Filter(not_enclosing_node=_JAVASPARKCONTEXT_OCE_QUERY))
            fs.add(
                Filter(not_enclosing_node=_EXPR_STMT_CHAIN_ENDS_WITH_GETORCREATE_QUERY)
            )
        elif self.language == "scala":
            fs.add(Filter(not_enclosing_node=_SCALA_CHAIN_ENDS_WITH_GETORCREATE_QUERY))

        update_spark_conf_init = Rule(
            name="update_spark_conf_init",
            query="cs new SparkConf()",
            replace_node="*",
            replace='new SparkConf().set("spark.sql.legacy.timeParserPolicy","LEGACY").set("spark.sql.legacy.allowUntypedScalaUDF", "true")',
            filters=fs,
        )

        fs2 = {
            Filter(
                not_enclosing_node='cs SparkSession.builder().config("spark.sql.legacy.timeParserPolicy","LEGACY").config("spark.sql.legacy.allowUntypedScalaUDF", "true")'
            )
        }
        if self.language == "java":
            fs2.add(Filter(not_enclosing_node=_JAVASPARKCONTEXT_OCE_QUERY))
            fs2.add(Filter(not_enclosing_node=_SPARK_SESSION_BUILDER_CHAIN_QUERY))
            fs2.add(
                Filter(not_enclosing_node=_EXPR_STMT_CHAIN_ENDS_WITH_GETORCREATE_QUERY)
            )
        elif self.language == "scala":
            fs2.add(Filter(not_enclosing_node=_SCALA_CHAIN_ENDS_WITH_GETORCREATE_QUERY))

        update_spark_session_builder_init = Rule(
            name="update_spark_conf_init",
            query="cs SparkSession.builder()",
            replace_node="*",
            replace='SparkSession.builder().config("spark.sql.legacy.timeParserPolicy","LEGACY").config("spark.sql.legacy.allowUntypedScalaUDF", "true")',
            filters=fs2,
        )

        update_import_array_queue = Rule(
            name="update_import_array_queue",
            query=(
                "cs import org.spark_project.jetty.util.ArrayQueue;"
                if self.language == "java"
                else "cs import org.spark_project.jetty.util.ArrayQueue"
            ),
            replace_node="*",
            replace=(
                "import java.util.ArrayDeque;"
                if self.language == "java"
                else "import java.util.ArrayDeque"
            ),
        )

        return [
            update_spark_conf_init,
            update_spark_session_builder_init,
            update_import_array_queue,
        ]