Skip to content
Closed
47 changes: 47 additions & 0 deletions src/__tests__/normalizeLmStudioBaseUrl.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import { describe, expect, it } from "vitest";
import {
getLMStudioBaseUrl,
normalizeLmStudioBaseUrl,
} from "@/ipc/utils/lm_studio_utils";
import { DEFAULT_LM_STUDIO_ENDPOINT } from "@/constants/localModels";

describe("normalizeLmStudioBaseUrl", () => {
it("returns default endpoint when value is undefined", () => {
expect(normalizeLmStudioBaseUrl()).toBe(DEFAULT_LM_STUDIO_ENDPOINT);
});

it("trims whitespace and adds protocol", () => {
expect(normalizeLmStudioBaseUrl(" localhost ")).toBe(
`${DEFAULT_LM_STUDIO_ENDPOINT}`,
);
});

it("adds default port when missing", () => {
expect(normalizeLmStudioBaseUrl("192.168.0.10")).toBe(
"http://192.168.0.10:1234",
);
});

it("removes trailing /v1 if present", () => {
expect(normalizeLmStudioBaseUrl("http://example.com:9000/v1")).toBe(
"http://example.com:9000",
);
expect(normalizeLmStudioBaseUrl("http://example.com:9000/v1/")).toBe(
"http://example.com:9000",
);
});

it("preserves additional path segments", () => {
expect(normalizeLmStudioBaseUrl("http://example.com/custom/path/")).toBe(
"http://example.com/custom/path",
);
});
});

describe("getLMStudioBaseUrl", () => {
it("prefers env override when set", () => {
process.env.LM_STUDIO_BASE_URL_FOR_TESTING = "http://override:9999/v1";
expect(getLMStudioBaseUrl()).toBe("http://override:9999");
delete process.env.LM_STUDIO_BASE_URL_FOR_TESTING;
});
});
10 changes: 8 additions & 2 deletions src/__tests__/parseOllamaHost.test.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,26 @@
import { parseOllamaHost } from "@/ipc/handlers/local_model_ollama_handler";
import { describe, it, expect } from "vitest";
import { DEFAULT_OLLAMA_ENDPOINT } from "@/constants/localModels";

describe("parseOllamaHost", () => {
it("should return default URL when no host is provided", () => {
const result = parseOllamaHost();
expect(result).toBe("http://localhost:11434");
expect(result).toBe(DEFAULT_OLLAMA_ENDPOINT);
});

it("should return default URL when host is undefined", () => {
const result = parseOllamaHost(undefined);
expect(result).toBe("http://localhost:11434");
expect(result).toBe(DEFAULT_OLLAMA_ENDPOINT);
});

it("should return default URL when host is empty string", () => {
const result = parseOllamaHost("");
expect(result).toBe(DEFAULT_OLLAMA_ENDPOINT);
});

it("should trim whitespace before parsing", () => {
const result = parseOllamaHost(" localhost ");
expect(result).toBe("http://localhost:11434");

Check failure on line 23 in src/__tests__/parseOllamaHost.test.ts

View workflow job for this annotation

GitHub Actions / test (macos, macos-latest, 1, 4)

src/__tests__/parseOllamaHost.test.ts > parseOllamaHost > should trim whitespace before parsing

AssertionError: expected 'http:// localhost :11434' to be 'http://localhost:11434' // Object.is equality Expected: "http://localhost:11434" Received: "http:// localhost :11434" ❯ src/__tests__/parseOllamaHost.test.ts:23:20
});

describe("full URLs with protocol", () => {
Expand Down
187 changes: 187 additions & 0 deletions src/components/LocalModelEndpointSettings.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import { useEffect, useState } from "react";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Button } from "@/components/ui/button";
import { useSettings } from "@/hooks/useSettings";
import { showError, showSuccess } from "@/lib/toast";
import type { UserSettings } from "@/lib/schemas";
import {
DEFAULT_LM_STUDIO_ENDPOINT,
DEFAULT_OLLAMA_ENDPOINT,
} from "@/constants/localModels";

type SavingTarget = "ollama" | "lmstudio" | null;

type EndpointKind = "ollama" | "lmstudio";

const endpointConfig: Record<
EndpointKind,
{
defaultValue: string;
label: string;
description: string;
successMessage: string;
errorMessage: string;
}
> = {
ollama: {
defaultValue: DEFAULT_OLLAMA_ENDPOINT,
label: "Local model endpoint (Ollama-compatible)",
description:
"Used for listing and running Ollama-compatible local models, including remote hosts.",
successMessage: "Ollama endpoint updated",
errorMessage: "Failed to update Ollama endpoint",
},
lmstudio: {
defaultValue: DEFAULT_LM_STUDIO_ENDPOINT,
label: "LM Studio API endpoint",
description:
"Base URL for the LM Studio server. Trailing /v1 is optional and will be handled automatically.",
successMessage: "LM Studio endpoint updated",
errorMessage: "Failed to update LM Studio endpoint",
},
};

export function LocalModelEndpointSettings() {
const { settings, updateSettings } = useSettings();
const [ollamaValue, setOllamaValue] = useState(DEFAULT_OLLAMA_ENDPOINT);
const [lmStudioValue, setLmStudioValue] = useState(
DEFAULT_LM_STUDIO_ENDPOINT,
);
const [saving, setSaving] = useState<SavingTarget>(null);

useEffect(() => {
if (settings?.ollamaEndpoint) {
setOllamaValue(settings.ollamaEndpoint);
} else {
setOllamaValue(DEFAULT_OLLAMA_ENDPOINT);
}
}, [settings?.ollamaEndpoint]);

useEffect(() => {
if (settings?.lmStudioEndpoint) {
setLmStudioValue(settings.lmStudioEndpoint);
} else {
setLmStudioValue(DEFAULT_LM_STUDIO_ENDPOINT);
}
}, [settings?.lmStudioEndpoint]);

if (!settings) {
return null;
}

const handleSave = async (kind: EndpointKind) => {
const value = kind === "ollama" ? ollamaValue : lmStudioValue;
const config = endpointConfig[kind];
const trimmed = value.trim();
const valueToPersist = trimmed.length > 0 ? trimmed : config.defaultValue;
const payload: Partial<UserSettings> =
kind === "ollama"
? { ollamaEndpoint: valueToPersist }
: { lmStudioEndpoint: valueToPersist };

setSaving(kind);
try {
await updateSettings(payload);
if (kind === "ollama") {
setOllamaValue(valueToPersist);
} else {
setLmStudioValue(valueToPersist);
}
showSuccess(config.successMessage);
} catch (error) {
const message =
error instanceof Error
? error.message
: String(error ?? "Unknown error");
showError(`${config.errorMessage}: ${message}`);
} finally {
setSaving(null);
}
};

const handleReset = async (kind: EndpointKind) => {
const config = endpointConfig[kind];
const payload: Partial<UserSettings> =
kind === "ollama"
? { ollamaEndpoint: config.defaultValue }
: { lmStudioEndpoint: config.defaultValue };

setSaving(kind);
try {
await updateSettings(payload);
if (kind === "ollama") {
setOllamaValue(config.defaultValue);
} else {
setLmStudioValue(config.defaultValue);
}
showSuccess(`${config.successMessage} (reset)`);
} catch (error) {
const message =
error instanceof Error
? error.message
: String(error ?? "Unknown error");
showError(`${config.errorMessage}: ${message}`);
} finally {
setSaving(null);
}
};

const renderEndpointField = (kind: EndpointKind) => {
const config = endpointConfig[kind];
const value = kind === "ollama" ? ollamaValue : lmStudioValue;
const onChange = kind === "ollama" ? setOllamaValue : setLmStudioValue;
const isSaving = saving === kind;
const isDefault = value === config.defaultValue;

return (
<div className="space-y-2">
<div className="space-y-1">
<Label htmlFor={`${kind}-endpoint`} className="text-sm font-medium">
{config.label}
</Label>
<p className="text-sm text-gray-500 dark:text-gray-400">
{config.description}
</p>
</div>
<div className="flex flex-col gap-2 sm:flex-row sm:items-center">
<Input
id={`${kind}-endpoint`}
value={value}
onChange={(event) => onChange(event.target.value)}
className="sm:flex-1"
autoComplete="off"
spellCheck={false}
/>
<div className="flex gap-2">
<Button
onClick={() => handleSave(kind)}
disabled={isSaving}
type="button"
>
{isSaving ? "Saving..." : "Save"}
</Button>
<Button
onClick={() => handleReset(kind)}
variant="ghost"
disabled={isSaving || isDefault}
type="button"
>
Reset
</Button>
</div>
</div>
<p className="text-xs text-gray-500 dark:text-gray-400">
Default: {config.defaultValue}
</p>
</div>
);
};

return (
<div className="space-y-6">
{renderEndpointField("ollama")}
{renderEndpointField("lmstudio")}
</div>
);
}
5 changes: 5 additions & 0 deletions src/constants/localModels.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
export const DEFAULT_OLLAMA_ENDPOINT = "http://localhost:11434";
export const DEFAULT_OLLAMA_PORT = 11434;

export const DEFAULT_LM_STUDIO_ENDPOINT = "http://localhost:1234";
export const DEFAULT_LM_STUDIO_PORT = 1234;
9 changes: 4 additions & 5 deletions src/ipc/handlers/local_model_lmstudio_handler.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { ipcMain } from "electron";
import log from "electron-log";
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
import { LM_STUDIO_BASE_URL } from "../utils/lm_studio_utils";
import type { LocalModel, LocalModelListResponse } from "../ipc_types";
import { getLMStudioBaseUrl } from "../utils/lm_studio_utils";

const logger = log.scope("lmstudio_handler");

Expand All @@ -19,9 +19,8 @@ export interface LMStudioModel {
}

export async function fetchLMStudioModels(): Promise<LocalModelListResponse> {
const modelsResponse: Response = await fetch(
`${LM_STUDIO_BASE_URL}/api/v0/models`,
);
const baseUrl = getLMStudioBaseUrl();
const modelsResponse: Response = await fetch(`${baseUrl}/api/v0/models`);
if (!modelsResponse.ok) {
throw new Error("Failed to fetch models from LM Studio");
}
Expand Down
33 changes: 25 additions & 8 deletions src/ipc/handlers/local_model_ollama_handler.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,21 @@
import {
DEFAULT_OLLAMA_ENDPOINT,
DEFAULT_OLLAMA_PORT,
} from "@/constants/localModels";
import { ipcMain } from "electron";
import log from "electron-log";
import { LocalModelListResponse, LocalModel } from "../ipc_types";
import { readSettings } from "../../main/settings";
import { LocalModel, LocalModelListResponse } from "../ipc_types";

const logger = log.scope("ollama_handler");

export function parseOllamaHost(host?: string): string {
if (!host) {
return "http://localhost:11434";
return DEFAULT_OLLAMA_ENDPOINT;
}

if (!host) {
return DEFAULT_OLLAMA_ENDPOINT;
}

// If it already has a protocol, use as-is
Expand All @@ -30,15 +39,22 @@ export function parseOllamaHost(host?: string): string {

// Check if it's a plain IPv6 address (contains :: or multiple colons)
if (host.includes("::") || host.split(":").length > 2) {
return `http://[${host}]:11434`;
const address = host.startsWith("[") ? host : `[${host}]`;
return `http://${address}:${DEFAULT_OLLAMA_PORT}`;
}

// If it's just a hostname, add default port
return `http://${host}:11434`;
return `http://${host}:${DEFAULT_OLLAMA_PORT}`;
}

export function getOllamaApiUrl(): string {
return parseOllamaHost(process.env.OLLAMA_HOST);
const envHost = process.env.OLLAMA_HOST;
if (envHost && envHost.trim()) {
return parseOllamaHost(envHost);
}
const settings = readSettings();
const endpointFromSettings = settings.ollamaEndpoint;
return parseOllamaHost(endpointFromSettings);
}

interface OllamaModel {
Expand All @@ -56,8 +72,9 @@ interface OllamaModel {
}

export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
const apiUrl = getOllamaApiUrl();
try {
const response = await fetch(`${getOllamaApiUrl()}/api/tags`);
const response = await fetch(`${apiUrl}/api/tags`);
if (!response.ok) {
throw new Error(`Failed to fetch model: ${response.statusText}`);
}
Expand Down Expand Up @@ -89,10 +106,10 @@ export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
(error as Error).message.includes("fetch failed")
) {
throw new Error(
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
`Could not connect to the local model endpoint at ${apiUrl}.`,
);
}
throw new Error("Failed to fetch models from Ollama");
throw new Error("Failed to fetch models from the local model endpoint");
}
}

Expand Down
Loading
Loading