#!/usr/bin/env python3 """ JR SQL AI GUI (Ollama) - lightweight Arch/Hyprland friendly GUI. - Left: Prompt/context - Right: Rendered Markdown answer + raw markdown - Buttons: Send, Copy, Copy SQL only, Model pull """ import json import os import re import sys from dataclasses import dataclass from typing import Optional, List import requests from PySide6.QtCore import Qt, QThread, Signal, QTimer from PySide6.QtGui import QFont from PySide6.QtWidgets import ( QApplication, QComboBox, QHBoxLayout, QLabel, QLineEdit, QMainWindow, QMessageBox, QPushButton, QPlainTextEdit, QSplitter, QStatusBar, QVBoxLayout, QWidget, QCheckBox, QTextBrowser, ) # ----------------------------- # Config (defaults) # ----------------------------- DEFAULT_OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "http://127.0.0.1:11434") DEFAULT_MODEL = os.environ.get("OLLAMA_MODEL", "jr-sql-expert:latest") # ----------------------------- # Helpers # ----------------------------- def is_docker_available() -> bool: return shutil.which("docker") is not None def human_error(e: Exception) -> str: return f"{type(e).__name__}: {e}" SQL_KW_RE = re.compile( r"\\b(select|from|where|join|group|order|having|insert|update|delete|create|alter|drop|with|merge)\\b", re.IGNORECASE, ) FENCE_RE = re.compile(r"```(\\w+)?\\s*\\n(.*?)\\n```", re.DOTALL) def extract_sql_blocks(markdown_text: str) -> List[str]: """ Extract SQL from markdown fenced code blocks. Priority: 1) ```sql ... ``` 2) any fenced block that looks like SQL (contains common keywords) """ blocks = [] for m in FENCE_RE.finditer(markdown_text): lang = (m.group(1) or "").strip().lower() body = (m.group(2) or "").strip() if not body: continue if lang == "sql": blocks.append(body) elif lang in ("tsql", "t-sql", "mssql"): blocks.append(body) else: if SQL_KW_RE.search(body): blocks.append(body) return blocks def build_sql_only_text(blocks: List[str]) -> str: if not blocks: return "" return "\\n\\n-- ----------------------------------------\\n\\n".join(blocks) + "\\n" # ----------------------------- # Workers (threads) # ----------------------------- @dataclass class GenerateParams: base_url: str model: str prompt: str stream: bool = True class GenerateWorker(QThread): chunk = Signal(str) # streaming chunk done = Signal(str) # full response error = Signal(str) def __init__(self, params: GenerateParams): super().__init__() self.params = params def run(self) -> None: try: url = self.params.base_url.rstrip("/") + "/api/generate" payload = { "model": self.params.model, "prompt": self.params.prompt, "stream": self.params.stream, } with requests.post(url, json=payload, stream=self.params.stream, timeout=(5, 600)) as r: r.raise_for_status() if not self.params.stream: data = r.json() self.done.emit(data.get("response", "")) return full = [] for line in r.iter_lines(decode_unicode=True): if not line: continue obj = json.loads(line) part = obj.get("response", "") if part: full.append(part) self.chunk.emit(part) if obj.get("done", False): break self.done.emit("".join(full)) except Exception as e: self.error.emit(human_error(e)) class PullModelWorker(QThread): status = Signal(str) done = Signal() error = Signal(str) def __init__(self, base_url: str, model: str): super().__init__() self.base_url = base_url self.model = model def run(self) -> None: try: url = self.base_url.rstrip("/") + "/api/pull" payload = {"name": self.model, "stream": True} with requests.post(url, json=payload, stream=True, timeout=(5, 1800)) as r: r.raise_for_status() for line in r.iter_lines(decode_unicode=True): if not line: continue obj = json.loads(line) st = obj.get("status") total = obj.get("total") completed = obj.get("completed") if st and total and completed: self.status.emit(f"{st}: {completed}/{total}") elif st: self.status.emit(st) self.done.emit() except Exception as e: self.error.emit(human_error(e)) # ----------------------------- # Main Window # ----------------------------- class MainWindow(QMainWindow): def __init__(self): super().__init__() self.setWindowTitle("JR SQL AI GUI (Ollama)") self._gen_worker: Optional[GenerateWorker] = None self._pull_worker: Optional[PullModelWorker] = None self._raw_markdown: str = "" self._render_timer = QTimer(self) self._render_timer.setInterval(250) # throttle UI updates self._render_timer.timeout.connect(self._render_markdown_throttled) root = QWidget() self.setCentralWidget(root) layout = QVBoxLayout(root) # Top bar top = QHBoxLayout() layout.addLayout(top) top.addWidget(QLabel("Ollama URL:")) self.base_url = QLineEdit(DEFAULT_OLLAMA_BASE_URL) self.base_url.setMinimumWidth(260) top.addWidget(self.base_url, 2) top.addWidget(QLabel("Model:")) self.model = QComboBox() self.model.setEditable(True) self.model.addItem(DEFAULT_MODEL) self.model.setCurrentText(DEFAULT_MODEL) self.model.setMinimumWidth(220) top.addWidget(self.model, 1) self.btn_refresh_models = QPushButton("Models laden") self.btn_refresh_models.clicked.connect(self.refresh_models) top.addWidget(self.btn_refresh_models) self.chk_stream = QCheckBox("Streaming") self.chk_stream.setChecked(True) top.addWidget(self.chk_stream) # Split view splitter = QSplitter(Qt.Horizontal) layout.addWidget(splitter, 1) # Left: prompt left = QWidget() left_l = QVBoxLayout(left) left_l.addWidget(QLabel("Prompt / Kontext")) self.prompt = QPlainTextEdit() self.prompt.setPlaceholderText("Prompt + Kontext hier einfügen …") self.prompt.setFont(QFont("Monospace", 10)) left_l.addWidget(self.prompt, 1) btn_row = QHBoxLayout() self.btn_send = QPushButton("An AI senden") self.btn_send.clicked.connect(self.on_send) btn_row.addWidget(self.btn_send) self.btn_clear = QPushButton("Leeren") self.btn_clear.clicked.connect(lambda: self.prompt.setPlainText("")) btn_row.addWidget(self.btn_clear) left_l.addLayout(btn_row) # Right: response right = QWidget() right_l = QVBoxLayout(right) right_l.addWidget(QLabel("Antwort (Markdown gerendert)")) self.response_view = QTextBrowser() self.response_view.setOpenExternalLinks(True) self.response_view.setFont(QFont("Monospace", 10)) right_l.addWidget(self.response_view, 1) self.response_raw = QPlainTextEdit() self.response_raw.setReadOnly(True) self.response_raw.setFont(QFont("Monospace", 10)) self.response_raw.setPlaceholderText("Raw Antwort (für Copy/Debug).") self.response_raw.setMaximumHeight(140) right_l.addWidget(self.response_raw) right_btn_row = QHBoxLayout() self.btn_copy = QPushButton("Antwort kopieren") self.btn_copy.clicked.connect(self.copy_response) right_btn_row.addWidget(self.btn_copy) self.btn_copy_sql = QPushButton("Copy SQL only") self.btn_copy_sql.clicked.connect(self.copy_sql_only) right_btn_row.addWidget(self.btn_copy_sql) self.btn_model_pull = QPushButton("Modell aktualisieren (pull)") self.btn_model_pull.clicked.connect(self.on_pull_model) right_btn_row.addWidget(self.btn_model_pull) right_l.addLayout(right_btn_row) splitter.addWidget(left) splitter.addWidget(right) splitter.setSizes([520, 760]) self.status = QStatusBar() self.setStatusBar(self.status) self.status.showMessage("Bereit.") QTimer.singleShot(300, self.refresh_models) # -------------- UI helpers -------------- def ui_busy(self, busy: bool) -> None: for w in [self.btn_send, self.btn_model_pull, self.btn_refresh_models, self.btn_copy_sql]: w.setEnabled(not busy) self.prompt.setEnabled(not busy) self.base_url.setEnabled(not busy) self.model.setEnabled(not busy) self.chk_stream.setEnabled(not busy) def msg_error(self, title: str, text: str) -> None: QMessageBox.critical(self, title, text) def msg_info(self, title: str, text: str) -> None: QMessageBox.information(self, title, text) # -------------- Model list -------------- def refresh_models(self) -> None: base = self.base_url.text().strip().rstrip("/") if not base: return try: r = requests.get(base + "/api/tags", timeout=(3, 15)) r.raise_for_status() data = r.json() models = [m.get("name") for m in data.get("models", []) if m.get("name")] if models: current = self.model.currentText() self.model.clear() self.model.addItems(models) if current in models: self.model.setCurrentText(current) else: self.model.setCurrentIndex(0) self.status.showMessage(f"{len(models)} Modelle geladen.", 2500) else: self.status.showMessage("Keine Modelle gefunden (api/tags leer).", 5000) except Exception as e: self.status.showMessage(f"Model-Liste konnte nicht geladen werden: {human_error(e)}", 8000) # -------------- Send / Generate -------------- def on_send(self) -> None: prompt = self.prompt.toPlainText().strip() if not prompt: self.msg_info("Hinweis", "Bitte erst einen Prompt/Kontext eingeben.") return base = self.base_url.text().strip() model = self.model.currentText().strip() if not base or not model: self.msg_info("Hinweis", "Bitte Ollama URL und Model setzen.") return self._raw_markdown = "" self.response_raw.setPlainText("") self.response_view.setMarkdown("") self.status.showMessage("Sende Anfrage …") self.ui_busy(True) params = GenerateParams( base_url=base, model=model, prompt=prompt, stream=self.chk_stream.isChecked(), ) self._gen_worker = GenerateWorker(params) self._gen_worker.chunk.connect(self._on_chunk) self._gen_worker.done.connect(self._on_done) self._gen_worker.error.connect(self._on_gen_error) self._gen_worker.start() if self.chk_stream.isChecked(): self._render_timer.start() def _on_chunk(self, s: str) -> None: self._raw_markdown += s self.response_raw.setPlainText(self._raw_markdown) self.response_raw.verticalScrollBar().setValue(self.response_raw.verticalScrollBar().maximum()) def _render_markdown_throttled(self) -> None: if self._raw_markdown: self.response_view.setMarkdown(self._raw_markdown) def _on_done(self, full: str) -> None: self._render_timer.stop() if not self.chk_stream.isChecked(): self._raw_markdown = full self.response_raw.setPlainText(full) self.response_view.setMarkdown(self._raw_markdown) self.status.showMessage("Fertig.", 2500) self.ui_busy(False) def _on_gen_error(self, err: str) -> None: self._render_timer.stop() self.ui_busy(False) self.status.showMessage("Fehler.", 5000) self.msg_error("Ollama Fehler", err) # -------------- Copy actions -------------- def copy_response(self) -> None: QApplication.clipboard().setText(self._raw_markdown or self.response_raw.toPlainText()) self.status.showMessage("Antwort (Markdown) in Clipboard kopiert.", 2500) def copy_sql_only(self) -> None: md = self._raw_markdown or self.response_raw.toPlainText() blocks = extract_sql_blocks(md) sql_text = build_sql_only_text(blocks) if not sql_text: self.msg_info("Kein SQL gefunden", "Ich habe in der Antwort keine SQL-Codeblöcke gefunden.") return QApplication.clipboard().setText(sql_text) self.status.showMessage(f"SQL kopiert ({len(blocks)} Block/Blöcke).", 3000) # -------------- Pull model -------------- def on_pull_model(self) -> None: base = self.base_url.text().strip() model = self.model.currentText().strip() if not base or not model: self.msg_info("Hinweis", "Bitte Ollama URL und Model setzen.") return self.ui_busy(True) self.status.showMessage(f"Pull: {model} …") self._pull_worker = PullModelWorker(base, model) self._pull_worker.status.connect(lambda s: self.status.showMessage(f"Pull: {s}")) self._pull_worker.done.connect(self._on_pull_done) self._pull_worker.error.connect(self._on_pull_err) self._pull_worker.start() def _on_pull_done(self) -> None: self.ui_busy(False) self.status.showMessage("Model pull abgeschlossen.", 4000) self.refresh_models() def _on_pull_err(self, err: str) -> None: self.ui_busy(False) self.msg_error("Model pull fehlgeschlagen", err) self.msg_error("Ollama Runtime Update fehlgeschlagen", err) def main() -> int: # For Wayland/Hyprland you can force: # QT_QPA_PLATFORM=wayland python sql_ai_gui.py app = QApplication(sys.argv) app.setApplicationName("JR SQL AI GUI") w = MainWindow() w.resize(1250, 760) w.show() return app.exec() if __name__ == "__main__": raise SystemExit(main())