in operators.py [0:0]
def execute(self, context):
prefs = bpy.context.preferences.addons[__package__].preferences
props = context.scene.meshgen_props
backend = Backend.instance()
props.history.clear()
self.temperature = prefs.temperature
self.prompt = props.prompt
self.messages = [{"role": "user", "content": self.prompt}]
self.log_text = bpy.data.texts.get("meshgen log")
if self.log_text is None:
self.log_text = bpy.data.texts.new("meshgen log")
else:
self.log_text.clear()
for window in context.window_manager.windows:
for area in window.screen.areas:
if area.type == "TEXT_EDITOR":
for space in area.spaces:
if (
space.type == "TEXT_EDITOR"
and space.text == self.log_text
):
space.show_word_wrap = True
space.show_line_numbers = True
space.top = 0
break
log_open = False
for window in context.window_manager.windows:
for area in window.screen.areas:
if area.type != "TEXT_EDITOR":
continue
for space in area.spaces:
if space.type == "TEXT_EDITOR" and space.text == self.log_text:
log_open = True
break
if log_open:
break
if log_open:
break
if not log_open:
bpy.ops.screen.area_split(direction="VERTICAL")
new_area = context.screen.areas[-1]
new_area.type = "TEXT_EDITOR"
new_area.spaces.active.text = self.log_text
self.log_text.write("\n----- New Chat -----\n")
self.log_text.write(f"Prompt: {self.prompt}\n")
if not backend.is_loaded():
provider_to_model = {
"LOCAL": prefs.current_model,
"ollama": prefs.ollama_model_name,
"huggingface": prefs.huggingface_model_id,
"anthropic": prefs.anthropic_model_id,
"openai": prefs.openai_model_id,
}
model_name = (
provider_to_model["LOCAL"]
if prefs.backend_type == "LOCAL"
else provider_to_model.get(prefs.llm_provider)
)
self.add_event("LOADING", "Loading...", f"Loading {model_name}...")
try:
backend.load()
self.pop_event()
self.add_event(
"LOADING_SUCCESS", model_name, f"Finished loading {model_name}"
)
except Exception as e:
self.pop_event()
self.add_event(
"LOADING_ERROR",
str(e),
f"Error loading {model_name}:\n{traceback.format_exc()}",
)
return {"CANCELLED"}
self._stop_event = threading.Event()
self._output_queue = backend.start_chat_completion(
self.messages, self.temperature, self._stop_event
)
props.state = "RUNNING"
self.add_event("THINKING", None, "Thinking...")
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
wm.modal_handler_add(self)
return {"RUNNING_MODAL"}