Fixed several bugs. First working solution. But uses CPUs rather than NVIDIA GPUs
This commit is contained in:
@@ -2,26 +2,34 @@
|
||||
set -euo pipefail
|
||||
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
# Create .env if missing (do not overwrite)
|
||||
cp -n "${ROOT}/.env.example" "${ROOT}/.env" || true
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "${ROOT}/.env"
|
||||
|
||||
ts(){ date -Is; }
|
||||
|
||||
log_dir="${ROOT}/logs"
|
||||
mkdir -p "$log_dir"
|
||||
log_file="${log_dir}/bootstrap-$(date -Iseconds).log"
|
||||
|
||||
# log everything (stdout+stderr)
|
||||
exec > >(tee -a "$log_file") 2>&1
|
||||
|
||||
echo "[$(ts)] bootstrap: starting (ROOT=$ROOT)"
|
||||
|
||||
echo "[$(ts)] bootstrap: docker compose up -d"
|
||||
docker compose -f "${ROOT}/docker-compose.yml" up -d
|
||||
|
||||
echo "[$(ts)] bootstrap: waiting for Ollama API at ${OLLAMA_URL} ..."
|
||||
for i in {1..90}; do
|
||||
for i in {1..120}; do
|
||||
if curl -sS "${OLLAMA_URL}/api/tags" >/dev/null 2>&1; then
|
||||
echo "[$(ts)] bootstrap: Ollama API is up."
|
||||
break
|
||||
fi
|
||||
if [[ $i -eq 90 ]]; then
|
||||
if [[ $i -eq 120 ]]; then
|
||||
echo "[$(ts)] bootstrap: ERROR: API did not come up in time."
|
||||
exit 1
|
||||
fi
|
||||
@@ -39,11 +47,29 @@ if [[ -n "${EXTRA_MODELS:-}" ]]; then
|
||||
fi
|
||||
|
||||
echo "[$(ts)] bootstrap: building expert model: ${EXPERT_MODEL}"
|
||||
|
||||
tmp="$(mktemp)"
|
||||
sed "s/\${BASE_MODEL}/${BASE_MODEL}/g" "${ROOT}/Modelfile" > "$tmp"
|
||||
docker exec -i ollama ollama create "${EXPERT_MODEL}" -f - < "$tmp"
|
||||
|
||||
# Copy Modelfile into container and build from explicit path (robust)
|
||||
docker cp "$tmp" ollama:/tmp/Modelfile.jr-sql-expert
|
||||
docker exec -it ollama ollama create "${EXPERT_MODEL}" -f /tmp/Modelfile.jr-sql-expert
|
||||
|
||||
rm -f "$tmp"
|
||||
|
||||
echo "[$(ts)] bootstrap: verifying model exists..."
|
||||
docker exec -it ollama ollama list | grep -F "${EXPERT_MODEL}" >/dev/null && \
|
||||
echo "[$(ts)] bootstrap: OK: ${EXPERT_MODEL} is available."
|
||||
|
||||
# End-to-end test
|
||||
if [[ ! -x "${ROOT}/bin/sqlai" ]]; then
|
||||
echo "[$(ts)] bootstrap: ERROR: ${ROOT}/bin/sqlai not found or not executable"
|
||||
ls -la "${ROOT}/bin" || true
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "[$(ts)] bootstrap: test (running one request)..."
|
||||
echo "SELECT 1;" | "${ROOT}/bin/sqlai" analyze-tsql
|
||||
echo "[$(ts)] bootstrap: test done"
|
||||
|
||||
echo "[$(ts)] bootstrap: done"
|
||||
echo "[$(ts)] bootstrap: test:"
|
||||
echo " echo "SELECT 1;" | ${ROOT}/bin/sqlai analyze-tsql"
|
||||
|
||||
@@ -2,27 +2,34 @@
|
||||
set -euo pipefail
|
||||
|
||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "${ROOT}/.env"
|
||||
|
||||
ts(){ date -Is; }
|
||||
|
||||
log_dir="${ROOT}/logs"
|
||||
mkdir -p "$log_dir"
|
||||
log_file="${log_dir}/update-$(date -Iseconds).log"
|
||||
|
||||
# log everything (stdout+stderr)
|
||||
exec > >(tee -a "$log_file") 2>&1
|
||||
|
||||
echo "[$(ts)] update: starting (ROOT=$ROOT)"
|
||||
|
||||
echo "[$(ts)] update: pulling docker image(s)"
|
||||
docker compose -f "${ROOT}/docker-compose.yml" pull
|
||||
|
||||
echo "[$(ts)] update: restarting services"
|
||||
docker compose -f "${ROOT}/docker-compose.yml" up -d
|
||||
|
||||
echo "[$(ts)] update: waiting for Ollama API at ${OLLAMA_URL} ..."
|
||||
for i in {1..90}; do
|
||||
for i in {1..120}; do
|
||||
if curl -sS "${OLLAMA_URL}/api/tags" >/dev/null 2>&1; then
|
||||
echo "[$(ts)] update: Ollama API is up."
|
||||
break
|
||||
fi
|
||||
if [[ $i -eq 90 ]]; then
|
||||
if [[ $i -eq 120 ]]; then
|
||||
echo "[$(ts)] update: ERROR: API did not come up in time."
|
||||
exit 1
|
||||
fi
|
||||
@@ -40,9 +47,17 @@ if [[ -n "${EXTRA_MODELS:-}" ]]; then
|
||||
fi
|
||||
|
||||
echo "[$(ts)] update: rebuilding expert model: ${EXPERT_MODEL}"
|
||||
|
||||
tmp="$(mktemp)"
|
||||
sed "s/\${BASE_MODEL}/${BASE_MODEL}/g" "${ROOT}/Modelfile" > "$tmp"
|
||||
docker exec -i ollama ollama create "${EXPERT_MODEL}" -f - < "$tmp"
|
||||
|
||||
docker cp "$tmp" ollama:/tmp/Modelfile.jr-sql-expert
|
||||
docker exec -it ollama ollama create "${EXPERT_MODEL}" -f /tmp/Modelfile.jr-sql-expert
|
||||
|
||||
rm -f "$tmp"
|
||||
|
||||
echo "[$(ts)] update: verifying model exists..."
|
||||
docker exec -it ollama ollama list | grep -F "${EXPERT_MODEL}" >/dev/null && \
|
||||
echo "[$(ts)] update: OK: ${EXPERT_MODEL} is available."
|
||||
|
||||
echo "[$(ts)] update: complete"
|
||||
|
||||
Reference in New Issue
Block a user