mirror of https://github.com/TriliumNext/Notes
Merge branch 'develop' into tree
commit
df68ed33bc
@ -0,0 +1,179 @@
|
||||
import "@excalidraw/excalidraw/index.css";
|
||||
import { Excalidraw, getSceneVersion, exportToSvg } from "@excalidraw/excalidraw";
|
||||
import { createElement, render, unmountComponentAtNode } from "preact/compat";
|
||||
import { AppState, BinaryFileData, ExcalidrawImperativeAPI, ExcalidrawProps, LibraryItem } from "@excalidraw/excalidraw/types";
|
||||
import type { ComponentType } from "preact";
|
||||
import { ExcalidrawElement, NonDeletedExcalidrawElement, Theme } from "@excalidraw/excalidraw/element/types";
|
||||
|
||||
export interface CanvasContent {
|
||||
elements: ExcalidrawElement[];
|
||||
files: BinaryFileData[];
|
||||
appState: Partial<AppState>;
|
||||
}
|
||||
|
||||
/** Indicates that it is fresh. excalidraw scene version is always >0 */
|
||||
const SCENE_VERSION_INITIAL = -1;
|
||||
|
||||
export default class Canvas {
|
||||
|
||||
private currentSceneVersion: number;
|
||||
private opts: ExcalidrawProps;
|
||||
private excalidrawApi!: ExcalidrawImperativeAPI;
|
||||
private initializedPromise: JQuery.Deferred<void>;
|
||||
|
||||
constructor(opts: ExcalidrawProps) {
|
||||
this.opts = opts;
|
||||
this.currentSceneVersion = SCENE_VERSION_INITIAL;
|
||||
this.initializedPromise = $.Deferred();
|
||||
}
|
||||
|
||||
renderCanvas(targetEl: HTMLElement) {
|
||||
unmountComponentAtNode(targetEl);
|
||||
render(this.createCanvasElement({
|
||||
...this.opts,
|
||||
excalidrawAPI: (api: ExcalidrawImperativeAPI) => {
|
||||
this.excalidrawApi = api;
|
||||
this.initializedPromise.resolve();
|
||||
},
|
||||
}), targetEl);
|
||||
}
|
||||
|
||||
async waitForApiToBecomeAvailable() {
|
||||
while (!this.excalidrawApi) {
|
||||
await this.initializedPromise;
|
||||
}
|
||||
}
|
||||
|
||||
private createCanvasElement(opts: ExcalidrawProps) {
|
||||
return createElement("div", { className: "excalidraw-wrapper", },
|
||||
createElement(Excalidraw as ComponentType<ExcalidrawProps>, opts)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* needed to ensure, that multipleOnChangeHandler calls do not trigger a save.
|
||||
* we compare the scene version as suggested in:
|
||||
* https://github.com/excalidraw/excalidraw/issues/3014#issuecomment-778115329
|
||||
*
|
||||
* info: sceneVersions are not incrementing. it seems to be a pseudo-random number
|
||||
*/
|
||||
isNewSceneVersion() {
|
||||
const sceneVersion = this.getSceneVersion();
|
||||
|
||||
return (
|
||||
this.currentSceneVersion === SCENE_VERSION_INITIAL || // initial scene version update
|
||||
this.currentSceneVersion !== sceneVersion
|
||||
); // ensure scene changed
|
||||
}
|
||||
|
||||
getSceneVersion() {
|
||||
const elements = this.excalidrawApi.getSceneElements();
|
||||
return getSceneVersion(elements);
|
||||
}
|
||||
|
||||
updateSceneVersion() {
|
||||
this.currentSceneVersion = this.getSceneVersion();
|
||||
}
|
||||
|
||||
resetSceneVersion() {
|
||||
this.currentSceneVersion = SCENE_VERSION_INITIAL;
|
||||
}
|
||||
|
||||
isInitialScene() {
|
||||
return this.currentSceneVersion === SCENE_VERSION_INITIAL;
|
||||
}
|
||||
|
||||
resetScene(theme: Theme) {
|
||||
this.excalidrawApi.updateScene({
|
||||
elements: [],
|
||||
appState: {
|
||||
theme
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
loadData(content: CanvasContent, theme: Theme) {
|
||||
const { elements, files } = content;
|
||||
const appState: Partial<AppState> = content.appState ?? {};
|
||||
appState.theme = theme;
|
||||
|
||||
// files are expected in an array when loading. they are stored as a key-index object
|
||||
// see example for loading here:
|
||||
// https://github.com/excalidraw/excalidraw/blob/c5a7723185f6ca05e0ceb0b0d45c4e3fbcb81b2a/src/packages/excalidraw/example/App.js#L68
|
||||
const fileArray: BinaryFileData[] = [];
|
||||
for (const fileId in files) {
|
||||
const file = files[fileId];
|
||||
// TODO: dataURL is replaceable with a trilium image url
|
||||
// maybe we can save normal images (pasted) with base64 data url, and trilium images
|
||||
// with their respective url! nice
|
||||
// file.dataURL = "http://localhost:8080/api/images/ltjOiU8nwoZx/start.png";
|
||||
fileArray.push(file);
|
||||
}
|
||||
|
||||
// Update the scene
|
||||
// TODO: Fix type of sceneData
|
||||
this.excalidrawApi.updateScene({
|
||||
elements,
|
||||
appState: appState as AppState
|
||||
});
|
||||
this.excalidrawApi.addFiles(fileArray);
|
||||
this.excalidrawApi.history.clear();
|
||||
}
|
||||
|
||||
async getData() {
|
||||
const elements = this.excalidrawApi.getSceneElements();
|
||||
const appState = this.excalidrawApi.getAppState();
|
||||
|
||||
/**
|
||||
* A file is not deleted, even though removed from canvas. Therefore, we only keep
|
||||
* files that are referenced by an element. Maybe this will change with a new excalidraw version?
|
||||
*/
|
||||
const files = this.excalidrawApi.getFiles();
|
||||
// parallel svg export to combat bitrot and enable rendering image for note inclusion, preview, and share
|
||||
const svg = await exportToSvg({
|
||||
elements,
|
||||
appState,
|
||||
exportPadding: 5, // 5 px padding
|
||||
files
|
||||
});
|
||||
const svgString = svg.outerHTML;
|
||||
|
||||
const activeFiles: Record<string, BinaryFileData> = {};
|
||||
elements.forEach((element: NonDeletedExcalidrawElement) => {
|
||||
if ("fileId" in element && element.fileId) {
|
||||
activeFiles[element.fileId] = files[element.fileId];
|
||||
}
|
||||
});
|
||||
|
||||
const content = {
|
||||
type: "excalidraw",
|
||||
version: 2,
|
||||
elements,
|
||||
files: activeFiles,
|
||||
appState: {
|
||||
scrollX: appState.scrollX,
|
||||
scrollY: appState.scrollY,
|
||||
zoom: appState.zoom
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
content,
|
||||
svg: svgString
|
||||
}
|
||||
}
|
||||
|
||||
async getLibraryItems() {
|
||||
return this.excalidrawApi.updateLibrary({
|
||||
libraryItems() {
|
||||
return [];
|
||||
},
|
||||
merge: true
|
||||
});
|
||||
}
|
||||
|
||||
async updateLibrary(libraryItems: LibraryItem[]) {
|
||||
this.excalidrawApi.updateLibrary({ libraryItems, merge: false });
|
||||
}
|
||||
|
||||
}
|
||||
@ -1,6 +1,5 @@
|
||||
import type FNote from "../../../entities/fnote";
|
||||
import type { OptionPages } from "../content_widget";
|
||||
import OptionsWidget from "./options_widget";
|
||||
import type { OptionPages } from "../../content_widget";
|
||||
import OptionsWidget from "../options_widget";
|
||||
|
||||
const TPL = `\
|
||||
<div class="options-section">
|
||||
File diff suppressed because one or more lines are too long
@ -0,0 +1,389 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { OpenAIService } from './openai_service.js';
|
||||
import { AnthropicService } from './anthropic_service.js';
|
||||
import { OllamaService } from './ollama_service.js';
|
||||
import type { ChatCompletionOptions } from '../ai_interface.js';
|
||||
import * as providers from './providers.js';
|
||||
import options from '../../options.js';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../../options.js', () => ({
|
||||
default: {
|
||||
getOption: vi.fn(),
|
||||
getOptionBool: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('../../log.js', () => ({
|
||||
default: {
|
||||
info: vi.fn(),
|
||||
error: vi.fn(),
|
||||
warn: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('openai', () => ({
|
||||
default: class MockOpenAI {
|
||||
chat = {
|
||||
completions: {
|
||||
create: vi.fn()
|
||||
}
|
||||
};
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('@anthropic-ai/sdk', () => ({
|
||||
default: class MockAnthropic {
|
||||
messages = {
|
||||
create: vi.fn()
|
||||
};
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('ollama', () => ({
|
||||
Ollama: class MockOllama {
|
||||
chat = vi.fn();
|
||||
show = vi.fn();
|
||||
}
|
||||
}));
|
||||
|
||||
describe('LLM Model Selection with Special Characters', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
// Set default options
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
const optionMap: Record<string, string> = {
|
||||
'aiEnabled': 'true',
|
||||
'aiTemperature': '0.7',
|
||||
'aiSystemPrompt': 'You are a helpful assistant.',
|
||||
'openaiApiKey': 'test-api-key',
|
||||
'openaiBaseUrl': 'https://api.openai.com/v1',
|
||||
'anthropicApiKey': 'test-anthropic-key',
|
||||
'anthropicBaseUrl': 'https://api.anthropic.com',
|
||||
'ollamaBaseUrl': 'http://localhost:11434'
|
||||
};
|
||||
return optionMap[key] || '';
|
||||
});
|
||||
vi.mocked(options.getOptionBool).mockReturnValue(true);
|
||||
});
|
||||
|
||||
describe('OpenAI Model Names', () => {
|
||||
it('should correctly handle model names with periods', async () => {
|
||||
const modelName = 'gpt-4.1-turbo-preview';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'openaiDefaultModel') return modelName;
|
||||
return '';
|
||||
});
|
||||
|
||||
const service = new OpenAIService();
|
||||
const opts: ChatCompletionOptions = {
|
||||
stream: false
|
||||
};
|
||||
|
||||
// Spy on getOpenAIOptions to verify model name is passed correctly
|
||||
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
|
||||
|
||||
try {
|
||||
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
|
||||
} catch (error) {
|
||||
// Expected to fail due to mocked API
|
||||
}
|
||||
|
||||
expect(getOpenAIOptionsSpy).toHaveBeenCalledWith(opts);
|
||||
const result = getOpenAIOptionsSpy.mock.results[0].value;
|
||||
expect(result.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle model names with slashes', async () => {
|
||||
const modelName = 'openai/gpt-4/turbo-2024';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'openaiDefaultModel') return modelName;
|
||||
return '';
|
||||
});
|
||||
|
||||
const service = new OpenAIService();
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
|
||||
|
||||
try {
|
||||
await service.generateChatCompletion([{ role: 'user', content: 'test' }], opts);
|
||||
} catch (error) {
|
||||
// Expected to fail due to mocked API
|
||||
}
|
||||
|
||||
const result = getOpenAIOptionsSpy.mock.results[0].value;
|
||||
expect(result.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle model names with colons', async () => {
|
||||
const modelName = 'custom:gpt-4:finetuned';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const getOpenAIOptionsSpy = vi.spyOn(providers, 'getOpenAIOptions');
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle model names with underscores and hyphens', async () => {
|
||||
const modelName = 'gpt-4_turbo-preview_v2.1';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle model names with special characters in API request', async () => {
|
||||
const modelName = 'gpt-4.1-turbo@latest';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'openaiDefaultModel') return modelName;
|
||||
if (key === 'openaiApiKey') return 'test-key';
|
||||
if (key === 'openaiBaseUrl') return 'https://api.openai.com/v1';
|
||||
return '';
|
||||
});
|
||||
|
||||
const service = new OpenAIService();
|
||||
|
||||
// Access the private openai client through the service
|
||||
const client = (service as any).getClient('test-key');
|
||||
const createSpy = vi.spyOn(client.chat.completions, 'create');
|
||||
|
||||
try {
|
||||
await service.generateChatCompletion(
|
||||
[{ role: 'user', content: 'test' }],
|
||||
{ stream: false }
|
||||
);
|
||||
} catch (error) {
|
||||
// Expected due to mock
|
||||
}
|
||||
|
||||
expect(createSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: modelName
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Anthropic Model Names', () => {
|
||||
it('should correctly handle Anthropic model names with periods', async () => {
|
||||
const modelName = 'claude-3.5-sonnet-20241022';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'anthropicDefaultModel') return modelName;
|
||||
if (key === 'anthropicApiKey') return 'test-key';
|
||||
return '';
|
||||
});
|
||||
|
||||
const opts: ChatCompletionOptions = {
|
||||
stream: false
|
||||
};
|
||||
|
||||
const anthropicOptions = providers.getAnthropicOptions(opts);
|
||||
expect(anthropicOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle Anthropic model names with colons', async () => {
|
||||
const modelName = 'anthropic:claude-3:opus';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const anthropicOptions = providers.getAnthropicOptions(opts);
|
||||
expect(anthropicOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle Anthropic model names in API request', async () => {
|
||||
const modelName = 'claude-3.5-sonnet@beta';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'anthropicDefaultModel') return modelName;
|
||||
if (key === 'anthropicApiKey') return 'test-key';
|
||||
if (key === 'anthropicBaseUrl') return 'https://api.anthropic.com';
|
||||
return '';
|
||||
});
|
||||
|
||||
const service = new AnthropicService();
|
||||
|
||||
// Access the private anthropic client
|
||||
const client = (service as any).getClient('test-key');
|
||||
const createSpy = vi.spyOn(client.messages, 'create');
|
||||
|
||||
try {
|
||||
await service.generateChatCompletion(
|
||||
[{ role: 'user', content: 'test' }],
|
||||
{ stream: false }
|
||||
);
|
||||
} catch (error) {
|
||||
// Expected due to mock
|
||||
}
|
||||
|
||||
expect(createSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
model: modelName
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ollama Model Names', () => {
|
||||
it('should correctly handle Ollama model names with colons', async () => {
|
||||
const modelName = 'llama3.1:70b-instruct-q4_K_M';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'ollamaDefaultModel') return modelName;
|
||||
if (key === 'ollamaBaseUrl') return 'http://localhost:11434';
|
||||
return '';
|
||||
});
|
||||
|
||||
const opts: ChatCompletionOptions = {
|
||||
stream: false
|
||||
};
|
||||
|
||||
const ollamaOptions = await providers.getOllamaOptions(opts);
|
||||
expect(ollamaOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle Ollama model names with slashes', async () => {
|
||||
const modelName = 'library/mistral:7b-instruct-v0.3';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const ollamaOptions = await providers.getOllamaOptions(opts);
|
||||
expect(ollamaOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle Ollama model names with special characters in options', async () => {
|
||||
const modelName = 'custom/llama3.1:70b-q4_K_M@latest';
|
||||
vi.mocked(options.getOption).mockImplementation((key: string) => {
|
||||
if (key === 'ollamaDefaultModel') return modelName;
|
||||
if (key === 'ollamaBaseUrl') return 'http://localhost:11434';
|
||||
return '';
|
||||
});
|
||||
|
||||
// Test that the model name is preserved in the options
|
||||
const opts: ChatCompletionOptions = {
|
||||
stream: false
|
||||
};
|
||||
|
||||
const ollamaOptions = await providers.getOllamaOptions(opts);
|
||||
expect(ollamaOptions.model).toBe(modelName);
|
||||
|
||||
// Also test with model specified in options
|
||||
const optsWithModel: ChatCompletionOptions = {
|
||||
model: 'another/model:v2.0@beta',
|
||||
stream: false
|
||||
};
|
||||
|
||||
const ollamaOptionsWithModel = await providers.getOllamaOptions(optsWithModel);
|
||||
expect(ollamaOptionsWithModel.model).toBe('another/model:v2.0@beta');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Model Name Edge Cases', () => {
|
||||
it('should handle empty model names gracefully', () => {
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: '',
|
||||
stream: false
|
||||
};
|
||||
|
||||
expect(() => providers.getOpenAIOptions(opts)).toThrow('No OpenAI model configured');
|
||||
});
|
||||
|
||||
it('should handle model names with unicode characters', async () => {
|
||||
const modelName = 'gpt-4-日本語-model';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle model names with spaces (encoded)', async () => {
|
||||
const modelName = 'custom model v2.1';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should preserve exact model name without transformation', async () => {
|
||||
const complexModelName = 'org/model-v1.2.3:tag@version#variant';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: complexModelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
// Test for all providers
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(complexModelName);
|
||||
|
||||
const anthropicOptions = providers.getAnthropicOptions(opts);
|
||||
expect(anthropicOptions.model).toBe(complexModelName);
|
||||
|
||||
const ollamaOptions = await providers.getOllamaOptions(opts);
|
||||
expect(ollamaOptions.model).toBe(complexModelName);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Model Configuration Parsing', () => {
|
||||
it('should not confuse provider prefix with model name containing colons', async () => {
|
||||
// This model name has a colon but 'custom' is not a known provider
|
||||
const modelName = 'custom:model:v1.2';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
|
||||
it('should handle provider prefix correctly', async () => {
|
||||
// When model has provider prefix, it should still use the full string
|
||||
const modelName = 'openai:gpt-4.1-turbo';
|
||||
const opts: ChatCompletionOptions = {
|
||||
model: modelName,
|
||||
stream: false
|
||||
};
|
||||
|
||||
const openaiOptions = providers.getOpenAIOptions(opts);
|
||||
expect(openaiOptions.model).toBe(modelName);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Integration with REST API', () => {
|
||||
it('should pass model names correctly through REST chat service', async () => {
|
||||
const modelName = 'gpt-4.1-turbo-preview@latest';
|
||||
|
||||
// Mock the configuration helpers
|
||||
vi.doMock('../config/configuration_helpers.js', () => ({
|
||||
getSelectedModelConfig: vi.fn().mockResolvedValue({
|
||||
model: modelName,
|
||||
provider: 'openai'
|
||||
}),
|
||||
isAIEnabled: vi.fn().mockResolvedValue(true)
|
||||
}));
|
||||
|
||||
const { getSelectedModelConfig } = await import('../config/configuration_helpers.js');
|
||||
const config = await getSelectedModelConfig();
|
||||
|
||||
expect(config?.model).toBe(modelName);
|
||||
});
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue