From c89b0c54418d6b739c765162ca180c8d154a6af8 Mon Sep 17 00:00:00 2001 From: Mohammed Farghal Date: Sun, 29 Dec 2024 19:20:59 +0100 Subject: feat: Add support for embeddings in the inference interface (#403) * support embeddings generation in inference.ts (cherry picked from commit 9ae8773ad13ed87af8f72f167bdd56e02ea66f15) * make AI worker generate embeddings for text bookmark * make AI worker generate embeddings for text bookmark * fix unintentional change -- inference image model * support embeddings for PDF bookmarks * Upgrade drizzle-kit Existing version is not working with the upgraded version of drizzle-orm. I removed the "driver" to the match the new schema of the Config. Quoting from their Config: * `driver` - optional param that is responsible for explicitly providing a driver to use when accessing a database * *Possible values*: `aws-data-api`, `d1-http`, `expo`, `turso`, `pglite` * If you don't use AWS Data API, D1, Turso or Expo - ypu don't need this driver. You can check a driver strategy choice here: https://orm. * fix formatting and lint * add comments about truncate content * Revert "Upgrade drizzle-kit" This reverts commit 08a02c8df4ea403de65986ed1265940c6c994a20. * revert keep alive field in Ollama * change the interface to accept multiple inputs * docs --------- Co-authored-by: Mohamed Bassem --- packages/shared/config.ts | 4 ++++ packages/shared/inference.ts | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) (limited to 'packages/shared') diff --git a/packages/shared/config.ts b/packages/shared/config.ts index aec88096..7b74fc21 100644 --- a/packages/shared/config.ts +++ b/packages/shared/config.ts @@ -24,6 +24,7 @@ const allEnv = z.object({ INFERENCE_JOB_TIMEOUT_SEC: z.coerce.number().default(30), INFERENCE_TEXT_MODEL: z.string().default("gpt-4o-mini"), INFERENCE_IMAGE_MODEL: z.string().default("gpt-4o-mini"), + EMBEDDING_TEXT_MODEL: z.string().default("text-embedding-3-small"), INFERENCE_CONTEXT_LENGTH: z.coerce.number().default(2048), OCR_CACHE_DIR: z.string().optional(), OCR_LANGS: z @@ -90,6 +91,9 @@ const serverConfigSchema = allEnv.transform((val) => { inferredTagLang: val.INFERENCE_LANG, contextLength: val.INFERENCE_CONTEXT_LENGTH, }, + embedding: { + textModel: val.EMBEDDING_TEXT_MODEL, + }, crawler: { numWorkers: val.CRAWLER_NUM_WORKERS, headlessBrowser: val.CRAWLER_HEADLESS_BROWSER, diff --git a/packages/shared/inference.ts b/packages/shared/inference.ts index 7cb88819..1573382f 100644 --- a/packages/shared/inference.ts +++ b/packages/shared/inference.ts @@ -9,6 +9,10 @@ export interface InferenceResponse { totalTokens: number | undefined; } +export interface EmbeddingResponse { + embeddings: number[][]; +} + export interface InferenceOptions { json: boolean; } @@ -28,6 +32,7 @@ export interface InferenceClient { image: string, opts: InferenceOptions, ): Promise; + generateEmbeddingFromText(inputs: string[]): Promise; } export class InferenceClientFactory { @@ -103,6 +108,20 @@ class OpenAIInferenceClient implements InferenceClient { } return { response, totalTokens: chatCompletion.usage?.total_tokens }; } + + async generateEmbeddingFromText( + inputs: string[], + ): Promise { + const model = serverConfig.embedding.textModel; + const embedResponse = await this.openAI.embeddings.create({ + model: model, + input: inputs, + }); + const embedding2D: number[][] = embedResponse.data.map( + (embedding: OpenAI.Embedding) => embedding.embedding, + ); + return { embeddings: embedding2D }; + } } class OllamaInferenceClient implements InferenceClient { @@ -183,4 +202,17 @@ class OllamaInferenceClient implements InferenceClient { opts, ); } + + async generateEmbeddingFromText( + inputs: string[], + ): Promise { + const embedding = await this.ollama.embed({ + model: serverConfig.embedding.textModel, + input: inputs, + // Truncate the input to fit into the model's max token limit, + // in the future we want to add a way to split the input into multiple parts. + truncate: true, + }); + return { embeddings: embedding.embeddings }; + } } -- cgit v1.2.3-70-g09d2