def __call__()

in nl2sql/tasks/sql_generation/core.py [0:0]


    def __call__(self, db: Database, question: str) -> CoreSqlGenratorResult:
        """
        Runs the SQL Generation pipeline
        """
        logger.info(f"Running {self.tasktype} ...")

        prompt_params = {
            "question": question,
            "query": question,
            "input": question,
            "thoughts": [],
            "answer": None,
            "dialect": db.db.dialect,
            "top_k": self.max_rows_limit,
            "table_info": db.db.table_info,
            "db_descriptor": {db.name: db.descriptor},
            "table_name": ", ".join(db.db._usable_tables),
            "table_names": list(db.db._usable_tables),
        }
        prompt_template = self.prompt.dialect_prompt_template_map.get(
            db.db.dialect,
            self.prompt.dialect_prompt_template_map.get("default"),
        )
        if prompt_template is None:
            raise ValueError(
                f"No suitable / default prompt template found for {db.db.dialect}"
            )
        prepared_prompt = prompt_template.format(
            **{
                k: v
                for k, v in prompt_params.items()
                if k in prompt_template.input_variables
            }
        )
        llm_response = self.llm.generate([prepared_prompt])
        logger.debug(
            f"[{self.tasktype}] : Received LLM Response : {llm_response.json()}"
        )
        try:
            raw_response = llm_response.generations[0][0].text.strip()
        except IndexError as exc:
            raise ValueError(
                f"Empty / Invalid Response received from LLM : {llm_response.json()}"
            ) from exc

        parsed_response = (
            self.prompt.parser.parse(raw_response)
            if self.prompt.parser
            else raw_response
        )
        processed_response = self.prompt.post_processor(parsed_response)
        intermediate_steps = [
            {
                "tasktype": self.tasktype,
                "prepared_prompt": prepared_prompt,
                "llm_response": llm_response.dict(),
                "raw_response": raw_response,
                "parsed_response": parsed_response,
                "processed_response": processed_response,
            }
        ]

        return CoreSqlGenratorResult(
            db_name=db.name,
            question=question,
            generated_query=processed_response,
            intermediate_steps=intermediate_steps,
        )