aboutsummaryrefslogtreecommitdiffstats
path: root/packages/shared
diff options
context:
space:
mode:
Diffstat (limited to 'packages/shared')
-rw-r--r--packages/shared/assetdb.ts19
-rw-r--r--packages/shared/langs.ts14
-rw-r--r--packages/shared/package.json9
-rw-r--r--packages/shared/searchQueryParser.test.ts391
-rw-r--r--packages/shared/searchQueryParser.ts360
-rw-r--r--packages/shared/types/bookmarks.ts12
-rw-r--r--packages/shared/types/lists.ts82
-rw-r--r--packages/shared/types/search.ts91
-rw-r--r--packages/shared/vitest.config.ts14
9 files changed, 970 insertions, 22 deletions
diff --git a/packages/shared/assetdb.ts b/packages/shared/assetdb.ts
index fb7d2461..2ef69279 100644
--- a/packages/shared/assetdb.ts
+++ b/packages/shared/assetdb.ts
@@ -123,6 +123,25 @@ export async function readAsset({
return { asset, metadata };
}
+export function createAssetReadStream({
+ userId,
+ assetId,
+ start,
+ end,
+}: {
+ userId: string;
+ assetId: string;
+ start?: number;
+ end?: number;
+}) {
+ const assetDir = getAssetDir(userId, assetId);
+
+ return fs.createReadStream(path.join(assetDir, "asset.bin"), {
+ start,
+ end,
+ });
+}
+
export async function readAssetMetadata({
userId,
assetId,
diff --git a/packages/shared/langs.ts b/packages/shared/langs.ts
index 736cca05..93df1d67 100644
--- a/packages/shared/langs.ts
+++ b/packages/shared/langs.ts
@@ -1,12 +1,20 @@
export const langNameMappings: Record<string, string> = {
en: "English",
- de: "German",
+ zh: "Simplified Chinese",
+ zhtw: "Traditional Chinese",
+ hr: "Croatian",
+ da: "Danish",
+ nl: "Dutch",
fr: "French",
+ gl: "Galician",
+ de: "German",
+ it: "Italian",
+ ja: "Japanese",
pl: "Polish",
+ ru: "Russian",
+ sp: "Spanish",
sv: "Swedish",
tr: "Turkish",
- zh: "Simplified Chinese",
- zhtw: "Traditional Chinese",
};
export const supportedLangs = Object.keys(langNameMappings);
diff --git a/packages/shared/package.json b/packages/shared/package.json
index d741b70f..93d5495a 100644
--- a/packages/shared/package.json
+++ b/packages/shared/package.json
@@ -10,18 +10,23 @@
"meilisearch": "^0.37.0",
"ollama": "^0.5.9",
"openai": "^4.67.1",
+ "typescript-parsec": "^0.3.4",
"winston": "^3.11.0",
"zod": "^3.22.4"
},
"devDependencies": {
"@hoarder/eslint-config": "workspace:^0.2.0",
"@hoarder/prettier-config": "workspace:^0.1.0",
- "@hoarder/tsconfig": "workspace:^0.1.0"
+ "@hoarder/tsconfig": "workspace:^0.1.0",
+ "vitest": "^1.3.1"
},
"scripts": {
"typecheck": "tsc --noEmit",
"format": "prettier . --ignore-path ../../.prettierignore",
- "lint": "eslint ."
+ "format:fix": "prettier . --write --ignore-path ../../.prettierignore",
+ "lint": "eslint .",
+ "lint:fix": "eslint . --fix",
+ "test": "vitest"
},
"main": "index.ts",
"eslintConfig": {
diff --git a/packages/shared/searchQueryParser.test.ts b/packages/shared/searchQueryParser.test.ts
new file mode 100644
index 00000000..5af7ca2f
--- /dev/null
+++ b/packages/shared/searchQueryParser.test.ts
@@ -0,0 +1,391 @@
+import { describe, expect, test } from "vitest";
+
+import { parseSearchQuery } from "./searchQueryParser";
+
+describe("Search Query Parser", () => {
+ test("simple is queries", () => {
+ expect(parseSearchQuery("is:archived")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "archived",
+ archived: true,
+ },
+ });
+ expect(parseSearchQuery("-is:archived")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "archived",
+ archived: false,
+ },
+ });
+ expect(parseSearchQuery("is:fav")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "favourited",
+ favourited: true,
+ },
+ });
+ expect(parseSearchQuery("-is:fav")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "favourited",
+ favourited: false,
+ },
+ });
+ expect(parseSearchQuery("is:tagged")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagged",
+ tagged: true,
+ },
+ });
+ expect(parseSearchQuery("-is:tagged")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagged",
+ tagged: false,
+ },
+ });
+ expect(parseSearchQuery("is:inlist")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "inlist",
+ inList: true,
+ },
+ });
+ expect(parseSearchQuery("-is:inlist")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "inlist",
+ inList: false,
+ },
+ });
+ });
+
+ test("simple string queries", () => {
+ expect(parseSearchQuery("url:https://example.com")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "url",
+ url: "https://example.com",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery("-url:https://example.com")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "url",
+ url: "https://example.com",
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery('url:"https://example.com"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "url",
+ url: "https://example.com",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery('-url:"https://example.com"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "url",
+ url: "https://example.com",
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery("#my-tag")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery("-#my-tag")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery('#"my tag"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagName",
+ tagName: "my tag",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery('-#"my tag"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "tagName",
+ tagName: "my tag",
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery("list:my-list")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "listName",
+ listName: "my-list",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery("-list:my-list")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "listName",
+ listName: "my-list",
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery('list:"my list"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "listName",
+ listName: "my list",
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery('-list:"my list"')).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "listName",
+ listName: "my list",
+ inverse: true,
+ },
+ });
+ });
+ test("date queries", () => {
+ expect(parseSearchQuery("after:2023-10-12")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "dateAfter",
+ dateAfter: new Date("2023-10-12"),
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery("-after:2023-10-12")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "dateAfter",
+ dateAfter: new Date("2023-10-12"),
+ inverse: true,
+ },
+ });
+ expect(parseSearchQuery("before:2023-10-12")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "dateBefore",
+ dateBefore: new Date("2023-10-12"),
+ inverse: false,
+ },
+ });
+ expect(parseSearchQuery("-before:2023-10-12")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "dateBefore",
+ dateBefore: new Date("2023-10-12"),
+ inverse: true,
+ },
+ });
+ });
+
+ test("complex queries", () => {
+ expect(parseSearchQuery("is:fav -is:archived")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "and",
+ matchers: [
+ {
+ type: "favourited",
+ favourited: true,
+ },
+ {
+ type: "archived",
+ archived: false,
+ },
+ ],
+ },
+ });
+
+ expect(parseSearchQuery("(is:fav is:archived) #my-tag")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "and",
+ matchers: [
+ {
+ type: "favourited",
+ favourited: true,
+ },
+ {
+ type: "archived",
+ archived: true,
+ },
+ {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: false,
+ },
+ ],
+ },
+ });
+
+ expect(parseSearchQuery("(is:fav is:archived) or (#my-tag)")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "or",
+ matchers: [
+ {
+ type: "and",
+ matchers: [
+ {
+ type: "favourited",
+ favourited: true,
+ },
+ {
+ type: "archived",
+ archived: true,
+ },
+ ],
+ },
+ {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: false,
+ },
+ ],
+ },
+ });
+
+ expect(parseSearchQuery("(is:fav or is:archived) and #my-tag")).toEqual({
+ result: "full",
+ text: "",
+ matcher: {
+ type: "and",
+ matchers: [
+ {
+ type: "or",
+ matchers: [
+ {
+ type: "favourited",
+ favourited: true,
+ },
+ {
+ type: "archived",
+ archived: true,
+ },
+ ],
+ },
+ {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: false,
+ },
+ ],
+ },
+ });
+ });
+ test("pure text", () => {
+ expect(parseSearchQuery("hello")).toEqual({
+ result: "full",
+ text: "hello",
+ matcher: undefined,
+ });
+ expect(parseSearchQuery("hello world")).toEqual({
+ result: "full",
+ text: "hello world",
+ matcher: undefined,
+ });
+ });
+
+ test("text interlived with matchers", () => {
+ expect(
+ parseSearchQuery(
+ "hello is:fav world is:archived mixed world #my-tag test",
+ ),
+ ).toEqual({
+ result: "full",
+ text: "hello world mixed world test",
+ matcher: {
+ type: "and",
+ matchers: [
+ {
+ type: "favourited",
+ favourited: true,
+ },
+ {
+ type: "archived",
+ archived: true,
+ },
+ {
+ type: "tagName",
+ tagName: "my-tag",
+ inverse: false,
+ },
+ ],
+ },
+ });
+ });
+
+ test("unknown qualifiers are emitted as pure text", () => {
+ expect(parseSearchQuery("is:fav is:helloworld")).toEqual({
+ result: "full",
+ text: "is:helloworld",
+ matcher: {
+ type: "favourited",
+ favourited: true,
+ },
+ });
+ });
+
+ test("partial results", () => {
+ expect(parseSearchQuery("(is:archived) or ")).toEqual({
+ result: "partial",
+ text: "or",
+ matcher: {
+ type: "archived",
+ archived: true,
+ },
+ });
+ expect(parseSearchQuery("is:fav is: ( random")).toEqual({
+ result: "partial",
+ text: "is: ( random",
+ matcher: {
+ type: "favourited",
+ favourited: true,
+ },
+ });
+ });
+});
diff --git a/packages/shared/searchQueryParser.ts b/packages/shared/searchQueryParser.ts
new file mode 100644
index 00000000..e52af274
--- /dev/null
+++ b/packages/shared/searchQueryParser.ts
@@ -0,0 +1,360 @@
+import {
+ alt,
+ alt_sc,
+ apply,
+ kmid,
+ kright,
+ lrec_sc,
+ opt,
+ rule,
+ seq,
+ str,
+ tok,
+ Token,
+ TokenPosition,
+} from "typescript-parsec";
+import { z } from "zod";
+
+import { Matcher } from "./types/search";
+
+enum TokenType {
+ And = "AND",
+ Or = "OR",
+
+ Qualifier = "QUALIFIER",
+ Ident = "IDENT",
+ StringLiteral = "STRING_LITERAL",
+
+ LParen = "LPAREN",
+ RParen = "RPAREN",
+ Space = "SPACE",
+ Hash = "HASH",
+ Minus = "MINUS",
+}
+
+// Rules are in order of priority
+const lexerRules: [RegExp, TokenType][] = [
+ [/^and/i, TokenType.And],
+ [/^or/i, TokenType.Or],
+
+ [/^#/, TokenType.Hash],
+ [/^(is|url|list|after|before):/, TokenType.Qualifier],
+
+ [/^"([^"]+)"/, TokenType.StringLiteral],
+
+ [/^\(/, TokenType.LParen],
+ [/^\)/, TokenType.RParen],
+ [/^\s+/, TokenType.Space],
+ [/^-/, TokenType.Minus],
+
+ // This needs to be last as it matches a lot of stuff
+ [/^[^ )(]+/, TokenType.Ident],
+] as const;
+
+class LexerToken implements Token<TokenType> {
+ private constructor(
+ private readonly input: string,
+ public kind: TokenType,
+ public text: string,
+ public pos: TokenPosition,
+ ) {}
+
+ public static from(input: string): Token<TokenType> | undefined {
+ const tok = new LexerToken(
+ input,
+ /* Doesn't matter */ TokenType.Ident,
+ "",
+ {
+ index: 0,
+ rowBegin: 1,
+ rowEnd: 1,
+ columnBegin: 0,
+ columnEnd: 0,
+ },
+ );
+ return tok.next;
+ }
+
+ public get next(): Token<TokenType> | undefined {
+ if (!this.input.length) {
+ return undefined;
+ }
+
+ for (const [regex, tokenType] of lexerRules) {
+ const matchRes = regex.exec(this.input);
+ if (!matchRes) {
+ continue;
+ }
+ const match = matchRes[0];
+ return new LexerToken(this.input.slice(match.length), tokenType, match, {
+ index: this.pos.index + match.length,
+ columnBegin: this.pos.index + 1,
+ columnEnd: this.pos.index + 1 + match.length,
+ // Our strings are always only one line
+ rowBegin: 1,
+ rowEnd: 1,
+ });
+ }
+ // No match
+ throw new Error(
+ `Failed to tokenize the token at position ${this.pos.index}: ${this.input[0]}`,
+ );
+ }
+}
+
+export interface TextAndMatcher {
+ text: string;
+ matcher?: Matcher;
+}
+
+const MATCHER = rule<TokenType, TextAndMatcher>();
+const EXP = rule<TokenType, TextAndMatcher>();
+
+MATCHER.setPattern(
+ alt_sc(
+ apply(
+ seq(opt(str("-")), kright(str("is:"), tok(TokenType.Ident))),
+ ([minus, ident]) => {
+ switch (ident.text) {
+ case "fav":
+ return {
+ text: "",
+ matcher: { type: "favourited", favourited: !minus },
+ };
+ case "archived":
+ return {
+ text: "",
+ matcher: { type: "archived", archived: !minus },
+ };
+ case "tagged":
+ return {
+ text: "",
+ matcher: { type: "tagged", tagged: !minus },
+ };
+ case "inlist":
+ return {
+ text: "",
+ matcher: { type: "inlist", inList: !minus },
+ };
+ default:
+ // If the token is not known, emit it as pure text
+ return {
+ text: `${minus?.text ?? ""}is:${ident.text}`,
+ matcher: undefined,
+ };
+ }
+ },
+ ),
+ apply(
+ seq(
+ opt(str("-")),
+ alt(tok(TokenType.Qualifier), tok(TokenType.Hash)),
+ alt(
+ apply(tok(TokenType.Ident), (tok) => {
+ return tok.text;
+ }),
+ apply(tok(TokenType.StringLiteral), (tok) => {
+ return tok.text.slice(1, -1);
+ }),
+ ),
+ ),
+ ([minus, qualifier, ident]) => {
+ switch (qualifier.text) {
+ case "url:":
+ return {
+ text: "",
+ matcher: { type: "url", url: ident, inverse: !!minus },
+ };
+ case "#":
+ return {
+ text: "",
+ matcher: { type: "tagName", tagName: ident, inverse: !!minus },
+ };
+ case "list:":
+ return {
+ text: "",
+ matcher: { type: "listName", listName: ident, inverse: !!minus },
+ };
+ case "after:":
+ try {
+ return {
+ text: "",
+ matcher: {
+ type: "dateAfter",
+ dateAfter: z.coerce.date().parse(ident),
+ inverse: !!minus,
+ },
+ };
+ } catch (e) {
+ return {
+ // If parsing the date fails, emit it as pure text
+ text: (minus?.text ?? "") + qualifier.text + ident,
+ matcher: undefined,
+ };
+ }
+ case "before:":
+ try {
+ return {
+ text: "",
+ matcher: {
+ type: "dateBefore",
+ dateBefore: z.coerce.date().parse(ident),
+ inverse: !!minus,
+ },
+ };
+ } catch (e) {
+ return {
+ // If parsing the date fails, emit it as pure text
+ text: (minus?.text ?? "") + qualifier.text + ident,
+ matcher: undefined,
+ };
+ }
+ default:
+ // If the token is not known, emit it as pure text
+ return {
+ text: (minus?.text ?? "") + qualifier.text + ident,
+ matcher: undefined,
+ };
+ }
+ },
+ ),
+ // Ident or an incomlete qualifier
+ apply(alt(tok(TokenType.Ident), tok(TokenType.Qualifier)), (toks) => {
+ return {
+ text: toks.text,
+ matcher: undefined,
+ };
+ }),
+ kmid(tok(TokenType.LParen), EXP, tok(TokenType.RParen)),
+ ),
+);
+
+EXP.setPattern(
+ lrec_sc(
+ MATCHER,
+ seq(
+ alt(
+ tok(TokenType.Space),
+ kmid(tok(TokenType.Space), tok(TokenType.And), tok(TokenType.Space)),
+ kmid(tok(TokenType.Space), tok(TokenType.Or), tok(TokenType.Space)),
+ ),
+ MATCHER,
+ ),
+ (toks, next) => {
+ switch (next[0].kind) {
+ case TokenType.Space:
+ case TokenType.And:
+ return {
+ text: [toks.text, next[1].text].join(" ").trim(),
+ matcher:
+ !!toks.matcher || !!next[1].matcher
+ ? {
+ type: "and",
+ matchers: [toks.matcher, next[1].matcher].filter(
+ (a) => !!a,
+ ) as Matcher[],
+ }
+ : undefined,
+ };
+ case TokenType.Or:
+ return {
+ text: [toks.text, next[1].text].join(" ").trim(),
+ matcher:
+ !!toks.matcher || !!next[1].matcher
+ ? {
+ type: "or",
+ matchers: [toks.matcher, next[1].matcher].filter(
+ (a) => !!a,
+ ) as Matcher[],
+ }
+ : undefined,
+ };
+ }
+ },
+ ),
+);
+
+function flattenAndsAndOrs(matcher: Matcher): Matcher {
+ switch (matcher.type) {
+ case "and":
+ case "or": {
+ if (matcher.matchers.length == 1) {
+ return flattenAndsAndOrs(matcher.matchers[0]);
+ }
+ const flattened: Matcher[] = [];
+ for (let m of matcher.matchers) {
+ // If inside the matcher is another matcher of the same type, flatten it
+ m = flattenAndsAndOrs(m);
+ if (m.type == matcher.type) {
+ flattened.push(...m.matchers);
+ } else {
+ flattened.push(m);
+ }
+ }
+ matcher.matchers = flattened;
+ return matcher;
+ }
+ default:
+ return matcher;
+ }
+}
+
+export function _parseAndPrintTokens(query: string) {
+ console.log(`PARSING: ${query}`);
+ let tok = LexerToken.from(query);
+ do {
+ console.log(tok?.kind, tok?.text);
+ tok = tok?.next;
+ } while (tok);
+ console.log("DONE");
+}
+
+function consumeTokenStream(token: Token<TokenType>) {
+ let str = "";
+ let tok: Token<TokenType> | undefined = token;
+ do {
+ str += tok.text;
+ tok = tok.next;
+ } while (tok);
+ return str;
+}
+
+export function parseSearchQuery(
+ query: string,
+): TextAndMatcher & { result: "full" | "partial" | "invalid" } {
+ // _parseAndPrintTokens(query); // Uncomment to debug tokenization
+ const parsed = EXP.parse(LexerToken.from(query.trim()));
+ if (!parsed.successful || parsed.candidates.length != 1) {
+ // If the query is not valid, return the whole query as pure text
+ return {
+ text: query,
+ result: "invalid",
+ };
+ }
+
+ const parseCandidate = parsed.candidates[0];
+ if (parseCandidate.result.matcher) {
+ parseCandidate.result.matcher = flattenAndsAndOrs(
+ parseCandidate.result.matcher,
+ );
+ }
+ if (parseCandidate.nextToken) {
+ // Parser failed to consume the whole query. This usually happen
+ // when the user is still typing the query. Return the partial
+ // result and the remaining query as pure text
+ return {
+ text: (
+ parseCandidate.result.text +
+ consumeTokenStream(parseCandidate.nextToken)
+ ).trim(),
+ matcher: parseCandidate.result.matcher,
+ result: "partial",
+ };
+ }
+
+ return {
+ text: parseCandidate.result.text,
+ matcher: parseCandidate.result.matcher,
+ result: "full",
+ };
+}
diff --git a/packages/shared/types/bookmarks.ts b/packages/shared/types/bookmarks.ts
index 8ee523a6..a1e39280 100644
--- a/packages/shared/types/bookmarks.ts
+++ b/packages/shared/types/bookmarks.ts
@@ -195,3 +195,15 @@ export const zManipulatedTagSchema = z
message: "You must provide either a tagId or a tagName",
path: ["tagId", "tagName"],
});
+
+export const zSearchBookmarksCursor = z.discriminatedUnion("ver", [
+ z.object({
+ ver: z.literal(1),
+ offset: z.number(),
+ }),
+]);
+export const zSearchBookmarksRequestSchema = z.object({
+ text: z.string(),
+ limit: z.number().max(MAX_NUM_BOOKMARKS_PER_PAGE).optional(),
+ cursor: zSearchBookmarksCursor.nullish(),
+});
diff --git a/packages/shared/types/lists.ts b/packages/shared/types/lists.ts
index d2041907..bd6786b0 100644
--- a/packages/shared/types/lists.ts
+++ b/packages/shared/types/lists.ts
@@ -1,28 +1,76 @@
import { z } from "zod";
-export const zNewBookmarkListSchema = z.object({
- name: z
- .string()
- .min(1, "List name can't be empty")
- .max(40, "List name is at most 40 chars"),
- icon: z.string(),
- parentId: z.string().nullish(),
-});
+import { parseSearchQuery } from "../searchQueryParser";
+
+export const zNewBookmarkListSchema = z
+ .object({
+ name: z
+ .string()
+ .min(1, "List name can't be empty")
+ .max(40, "List name is at most 40 chars"),
+ icon: z.string(),
+ type: z.enum(["manual", "smart"]).optional().default("manual"),
+ query: z.string().min(1).optional(),
+ parentId: z.string().nullish(),
+ })
+ .refine((val) => val.type === "smart" || !val.query, {
+ message: "Manual lists cannot have a query",
+ path: ["query"],
+ })
+ .refine((val) => val.type === "manual" || val.query, {
+ message: "Smart lists must have a query",
+ path: ["query"],
+ })
+ .refine(
+ (val) => !val.query || parseSearchQuery(val.query).result === "full",
+ {
+ message: "Smart search query is not valid",
+ path: ["query"],
+ },
+ )
+ .refine((val) => !val.query || parseSearchQuery(val.query).text.length == 0, {
+ message:
+ "Smart lists cannot have unqualified terms (aka full text search terms) in the query",
+ path: ["query"],
+ });
export const zBookmarkListSchema = z.object({
id: z.string(),
name: z.string(),
icon: z.string(),
parentId: z.string().nullable(),
+ type: z.enum(["manual", "smart"]).default("manual"),
+ query: z.string().nullish(),
});
-export const zBookmarkListWithBookmarksSchema = zBookmarkListSchema.merge(
- z.object({
- bookmarks: z.array(z.string()),
- }),
-);
-
export type ZBookmarkList = z.infer<typeof zBookmarkListSchema>;
-export type ZBookmarkListWithBookmarks = z.infer<
- typeof zBookmarkListWithBookmarksSchema
->;
+
+export const zEditBookmarkListSchema = z.object({
+ listId: z.string(),
+ name: z
+ .string()
+ .min(1, "List name can't be empty")
+ .max(40, "List name is at most 40 chars")
+ .optional(),
+ icon: z.string().optional(),
+ parentId: z.string().nullish(),
+ query: z.string().min(1).optional(),
+});
+
+export const zEditBookmarkListSchemaWithValidation = zEditBookmarkListSchema
+ .refine((val) => val.parentId != val.listId, {
+ message: "List can't be its own parent",
+ path: ["parentId"],
+ })
+ .refine(
+ (val) => !val.query || parseSearchQuery(val.query).result === "full",
+ {
+ message: "Smart search query is not valid",
+ path: ["query"],
+ },
+ )
+ .refine((val) => !val.query || parseSearchQuery(val.query).text.length == 0, {
+ message:
+ "Smart lists cannot have unqualified terms (aka full text search terms) in the query",
+ path: ["query"],
+ });
diff --git a/packages/shared/types/search.ts b/packages/shared/types/search.ts
new file mode 100644
index 00000000..9d97fdd8
--- /dev/null
+++ b/packages/shared/types/search.ts
@@ -0,0 +1,91 @@
+import { z } from "zod";
+
+const zTagNameMatcher = z.object({
+ type: z.literal("tagName"),
+ tagName: z.string(),
+ inverse: z.boolean(),
+});
+
+const zListNameMatcher = z.object({
+ type: z.literal("listName"),
+ listName: z.string(),
+ inverse: z.boolean(),
+});
+
+const zArchivedMatcher = z.object({
+ type: z.literal("archived"),
+ archived: z.boolean(),
+});
+
+const urlMatcher = z.object({
+ type: z.literal("url"),
+ url: z.string(),
+ inverse: z.boolean(),
+});
+
+const zFavouritedMatcher = z.object({
+ type: z.literal("favourited"),
+ favourited: z.boolean(),
+});
+
+const zDateAfterMatcher = z.object({
+ type: z.literal("dateAfter"),
+ dateAfter: z.date(),
+ inverse: z.boolean(),
+});
+
+const zDateBeforeMatcher = z.object({
+ type: z.literal("dateBefore"),
+ dateBefore: z.date(),
+ inverse: z.boolean(),
+});
+
+const zIsTaggedMatcher = z.object({
+ type: z.literal("tagged"),
+ tagged: z.boolean(),
+});
+
+const zIsInListMatcher = z.object({
+ type: z.literal("inlist"),
+ inList: z.boolean(),
+});
+
+const zNonRecursiveMatcher = z.union([
+ zTagNameMatcher,
+ zListNameMatcher,
+ zArchivedMatcher,
+ urlMatcher,
+ zFavouritedMatcher,
+ zDateAfterMatcher,
+ zDateBeforeMatcher,
+ zIsTaggedMatcher,
+ zIsInListMatcher,
+]);
+
+type NonRecursiveMatcher = z.infer<typeof zNonRecursiveMatcher>;
+export type Matcher =
+ | NonRecursiveMatcher
+ | { type: "and"; matchers: Matcher[] }
+ | { type: "or"; matchers: Matcher[] };
+
+export const zMatcherSchema: z.ZodType<Matcher> = z.lazy(() => {
+ return z.discriminatedUnion("type", [
+ zTagNameMatcher,
+ zListNameMatcher,
+ zArchivedMatcher,
+ urlMatcher,
+ zFavouritedMatcher,
+ zDateAfterMatcher,
+ zDateBeforeMatcher,
+ zIsTaggedMatcher,
+ zIsInListMatcher,
+ z.object({
+ type: z.literal("and"),
+ matchers: z.array(zMatcherSchema),
+ }),
+ z.object({
+ type: z.literal("or"),
+ matchers: z.array(zMatcherSchema),
+ }),
+ ]);
+});
diff --git a/packages/shared/vitest.config.ts b/packages/shared/vitest.config.ts
new file mode 100644
index 00000000..41fd70c4
--- /dev/null
+++ b/packages/shared/vitest.config.ts
@@ -0,0 +1,14 @@
+/// <reference types="vitest" />
+
+import tsconfigPaths from "vite-tsconfig-paths";
+import { defineConfig } from "vitest/config";
+
+// https://vitejs.dev/config/
+export default defineConfig({
+ plugins: [tsconfigPaths()],
+ test: {
+ alias: {
+ "@/*": "./*",
+ },
+ },
+});