Xenova HF staff commited on
Commit
1efb9e6
1 Parent(s): 4637621

Upload 23 files

Browse files
.env.local.example ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ SUPABASE_URL=your-project-url
2
+ SUPABASE_ANON_KEY=your-anon-key
.eslintrc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "extends": "next/core-web-vitals"
3
+ }
.gitignore ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.js
7
+
8
+ # testing
9
+ /coverage
10
+
11
+ # next.js
12
+ /.next/
13
+ /out/
14
+
15
+ # production
16
+ /build
17
+
18
+ # misc
19
+ .DS_Store
20
+ *.pem
21
+
22
+ # debug
23
+ npm-debug.log*
24
+ yarn-debug.log*
25
+ yarn-error.log*
26
+
27
+ # local env files
28
+ .env*.local
29
+
30
+ # vercel
31
+ .vercel
32
+
33
+ # typescript
34
+ *.tsbuildinfo
35
+ next-env.d.ts
Dockerfile ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1.4
2
+
3
+ # Adapted from https://github.com/vercel/next.js/blob/e60a1e747c3f521fc24dfd9ee2989e13afeb0a9b/examples/with-docker/Dockerfile
4
+ # For more information, see https://nextjs.org/docs/pages/building-your-application/deploying#docker-image
5
+
6
+ FROM node:18 AS base
7
+
8
+ # Install dependencies only when needed
9
+ FROM base AS deps
10
+ WORKDIR /app
11
+
12
+ # Install dependencies based on the preferred package manager
13
+ COPY --link package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./
14
+ RUN \
15
+ if [ -f yarn.lock ]; then yarn --frozen-lockfile; \
16
+ elif [ -f package-lock.json ]; then npm ci; \
17
+ elif [ -f pnpm-lock.yaml ]; then yarn global add pnpm && pnpm i --frozen-lockfile; \
18
+ else echo "Lockfile not found." && exit 1; \
19
+ fi
20
+
21
+
22
+ # Rebuild the source code only when needed
23
+ FROM base AS builder
24
+ WORKDIR /app
25
+ COPY --from=deps --link /app/node_modules ./node_modules
26
+ COPY --link . .
27
+
28
+ # Next.js collects completely anonymous telemetry data about general usage.
29
+ # Learn more here: https://nextjs.org/telemetry
30
+ # Uncomment the following line in case you want to disable telemetry during the build.
31
+ # ENV NEXT_TELEMETRY_DISABLED 1
32
+
33
+ RUN npm run build
34
+
35
+ # If using yarn comment out above and use below instead
36
+ # RUN yarn build
37
+
38
+ # Production image, copy all the files and run next
39
+ FROM base AS runner
40
+ WORKDIR /app
41
+
42
+ ENV NODE_ENV production
43
+ # Uncomment the following line in case you want to disable telemetry during runtime.
44
+ # ENV NEXT_TELEMETRY_DISABLED 1
45
+
46
+ RUN \
47
+ addgroup --system --gid 1001 nodejs; \
48
+ adduser --system --uid 1001 nextjs
49
+
50
+ COPY --from=builder --link /app/public ./public
51
+
52
+ # Automatically leverage output traces to reduce image size
53
+ # https://nextjs.org/docs/advanced-features/output-file-tracing
54
+ COPY --from=builder --link --chown=1001:1001 /app/.next/standalone ./
55
+ COPY --from=builder --link --chown=1001:1001 /app/.next/static ./.next/static
56
+
57
+ USER nextjs
58
+
59
+ EXPOSE 3000
60
+
61
+ ENV PORT 3000
62
+ ENV HOSTNAME localhost
63
+
64
+ # Allow the running process to write model files to the cache folder.
65
+ # NOTE: In practice, you would probably want to pre-download the model files to avoid having to download them on-the-fly.
66
+ RUN mkdir -p /app/node_modules/@xenova/.cache/
67
+ RUN chmod 777 -R /app/node_modules/@xenova/
68
+
69
+ CMD ["node", "server.js"]
README.md CHANGED
@@ -1,10 +1,34 @@
1
- ---
2
- title: Semantic Image Search
3
- emoji: 📚
4
- colorFrom: green
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
2
+
3
+ ## Getting Started
4
+
5
+ First, run the development server:
6
+
7
+ ```bash
8
+ npm run dev
9
+ # or
10
+ yarn dev
11
+ # or
12
+ pnpm dev
13
+ ```
14
+
15
+ Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
16
+
17
+ You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file.
18
+
19
+ This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
20
+
21
+ ## Learn More
22
+
23
+ To learn more about Next.js, take a look at the following resources:
24
+
25
+ - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
26
+ - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
27
+
28
+ You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome!
29
+
30
+ ## Deploy on Vercel
31
+
32
+ The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
33
+
34
+ Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
jsconfig.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "paths": {
4
+ "@/*": ["./src/*"]
5
+ }
6
+ }
7
+ }
next.config.js ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @type {import('next').NextConfig} */
2
+ const nextConfig = {
3
+ // (Optional) Export as a standalone site
4
+ // See https://nextjs.org/docs/pages/api-reference/next-config-js/output#automatically-copying-traced-files
5
+ output: 'standalone', // Feel free to modify/remove this option
6
+
7
+ // Indicate that these packages should not be bundled by webpack
8
+ experimental: {
9
+ serverComponentsExternalPackages: ['sharp', 'onnxruntime-node'],
10
+ },
11
+
12
+ // Define which domains we are allowed to load images from
13
+ images: {
14
+ domains: ['images.unsplash.com'],
15
+ },
16
+ };
17
+
18
+ module.exports = nextConfig;
package.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "semantic-image-search",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "scripts": {
6
+ "dev": "next dev",
7
+ "build": "next build",
8
+ "start": "next start",
9
+ "lint": "next lint"
10
+ },
11
+ "dependencies": {
12
+ "@xenova/transformers": "^2.5.0",
13
+ "@supabase/supabase-js": "^2.31.0",
14
+ "autoprefixer": "10.4.14",
15
+ "blurhash": "^2.0.5",
16
+ "eslint": "8.45.0",
17
+ "eslint-config-next": "13.4.12",
18
+ "next": "13.4.12",
19
+ "postcss": "8.4.27",
20
+ "react": "18.2.0",
21
+ "react-dom": "18.2.0",
22
+ "tailwindcss": "3.3.3"
23
+ },
24
+ "overrides": {
25
+ "protobufjs": "^7.2.4"
26
+ }
27
+ }
postcss.config.js ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ module.exports = {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
+ }
public/next.svg ADDED
public/vercel.svg ADDED
scripts/update-database.mjs ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Helper script to update the database with image embeddings
2
+
3
+ import { AutoProcessor, RawImage, CLIPVisionModelWithProjection } from '@xenova/transformers';
4
+ import { createClient } from '@supabase/supabase-js'
5
+
6
+ if (!process.env.SUPABASE_SECRET_KEY) {
7
+ throw new Error('Missing `SUPABASE_SECRET_KEY` environment variable.')
8
+ }
9
+
10
+ // Create a single supabase client for interacting with your database
11
+ const supabase = createClient(
12
+ process.env.SUPABASE_URL,
13
+ process.env.SUPABASE_SECRET_KEY,
14
+ )
15
+
16
+ let { data, error } = await supabase
17
+ .from('images')
18
+ .select('*')
19
+ .neq('ignore', true)
20
+ .is('image_embedding', null);
21
+
22
+ if (error) {
23
+ throw error;
24
+ }
25
+
26
+ // Load processor and vision model
27
+ const model_id = 'Xenova/clip-vit-base-patch16';
28
+ const processor = await AutoProcessor.from_pretrained(model_id);
29
+ const vision_model = await CLIPVisionModelWithProjection.from_pretrained(model_id, {
30
+ quantized: false,
31
+ });
32
+
33
+ for (const image_data of data) {
34
+ let image;
35
+ try {
36
+ image = await RawImage.read(image_data.photo_image_url);
37
+ } catch (e) {
38
+ // Unable to load image, so we ignore it
39
+ console.warn('Ignoring image due to error', e)
40
+ await supabase
41
+ .from('images')
42
+ .update({ ignore: true })
43
+ .eq('photo_id', image_data.photo_id)
44
+ .select()
45
+ continue;
46
+ }
47
+
48
+ // Read image and run processor
49
+ let image_inputs = await processor(image);
50
+
51
+ // Compute embeddings
52
+ const { image_embeds } = await vision_model(image_inputs);
53
+ const embed_as_list = image_embeds.tolist()[0];
54
+
55
+ // https://supabase.com/docs/guides/ai/vector-columns#storing-a-vector--embedding
56
+ const { data, error } = await supabase
57
+ .from('images')
58
+ .update({ image_embedding: embed_as_list })
59
+ .eq('photo_id', image_data.photo_id)
60
+ .select()
61
+
62
+ if (error) {
63
+ console.error('error', error)
64
+ } else {
65
+ console.log('success', image_data.photo_id)
66
+ }
67
+ }
src/app/app.js ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { AutoTokenizer, CLIPTextModelWithProjection } from "@xenova/transformers";
2
+ import { createClient } from '@supabase/supabase-js'
3
+
4
+ // Use the Singleton pattern to enable lazy construction of the pipeline.
5
+ // NOTE: We wrap the class in a function to prevent code duplication (see below).
6
+ const S = () => class ApplicationSingleton {
7
+ static model_id = 'Xenova/clip-vit-base-patch16';
8
+ static tokenizer = null;
9
+ static text_model = null;
10
+ static database = null;
11
+
12
+ static async getInstance() {
13
+ // Load tokenizer and text model
14
+ if (this.tokenizer === null) {
15
+ this.tokenizer = AutoTokenizer.from_pretrained(this.model_id);
16
+ }
17
+
18
+ if (this.text_model === null) {
19
+ this.text_model = CLIPTextModelWithProjection.from_pretrained(this.model_id, {
20
+ quantized: false,
21
+ });
22
+ }
23
+
24
+ if (this.database === null) {
25
+ this.database = createClient(
26
+ process.env.SUPABASE_URL,
27
+ process.env.SUPABASE_ANON_KEY,
28
+ )
29
+ }
30
+
31
+ return Promise.all([
32
+ this.tokenizer,
33
+ this.text_model,
34
+ this.database,
35
+ ]);
36
+ }
37
+ }
38
+
39
+ let ApplicationSingleton;
40
+ if (process.env.NODE_ENV !== 'production') {
41
+ // When running in development mode, attach the pipeline to the
42
+ // global object so that it's preserved between hot reloads.
43
+ // For more information, see https://vercel.com/guides/nextjs-prisma-postgres
44
+ if (!global.ApplicationSingleton) {
45
+ global.ApplicationSingleton = S();
46
+ }
47
+ ApplicationSingleton = global.ApplicationSingleton;
48
+ } else {
49
+ ApplicationSingleton = S();
50
+ }
51
+ export default ApplicationSingleton;
src/app/components/ImageGrid.jsx ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import Image from 'next/image'
2
+ import { blurHashToDataURL } from '../utils.js'
3
+
4
+ export function ImageGrid({ images, setCurrentImage }) {
5
+ return (
6
+ <div className="columns-2 gap-4 sm:columns-3 xl:columns-4 2xl:columns-5">
7
+ {images && images.map(({
8
+ photo_id,
9
+ photo_url,
10
+ photo_image_url,
11
+ photo_aspect_ratio,
12
+ photo_width,
13
+ photo_height,
14
+ blur_hash,
15
+ photo_description,
16
+ ai_description,
17
+ similarity,
18
+ }) => (
19
+ <div
20
+ key={photo_id}
21
+ href={photo_url}
22
+ className='after:content group cursor-pointer relative mb-4 block w-full after:pointer-events-none after:absolute after:inset-0 after:rounded-lg after:shadow-highlight'
23
+ onClick={() => {
24
+ setCurrentImage({
25
+ photo_id,
26
+ photo_url,
27
+ photo_image_url,
28
+ photo_aspect_ratio,
29
+ photo_width,
30
+ photo_height,
31
+ blur_hash,
32
+ photo_description,
33
+ ai_description,
34
+ similarity,
35
+ });
36
+ }}
37
+ >
38
+ <Image
39
+ alt={photo_description || ai_description || ""}
40
+ className="transform rounded-lg brightness-90 transition will-change-auto group-hover:brightness-110"
41
+ style={{ transform: 'translate3d(0, 0, 0)' }}
42
+ placeholder="blur"
43
+ blurDataURL={blurHashToDataURL(blur_hash)}
44
+ src={`${photo_image_url}?auto=format&fit=crop&w=480&q=80`}
45
+ width={480}
46
+ height={480 / photo_aspect_ratio}
47
+ unoptimized={true}
48
+ />
49
+ </div>
50
+ ))}
51
+ </div>)
52
+ }
src/app/components/Modal.jsx ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { downloadImage } from '../utils.js'
2
+ import Image from 'next/image'
3
+
4
+ export function Modal({ currentImage, setCurrentImage }) {
5
+ return (
6
+ <div
7
+ className='fixed inset-0 z-30 backdrop-blur-2xl w-full h-full bg-black top-0 left-0 transition'
8
+ style={{
9
+ backgroundColor: `rgba(0, 0, 0, ${currentImage ? 0.8 : 0})`,
10
+ opacity: currentImage ? 1 : 0,
11
+ pointerEvents: currentImage ? 'auto' : 'none',
12
+ }}
13
+ >
14
+ {currentImage && <>
15
+ <Image
16
+ alt={currentImage.photo_description || currentImage.ai_description || ""}
17
+ className="transform rounded-lg transition will-change-auto"
18
+ style={
19
+ { transform: 'translate3d(0, 0, 0)', }
20
+ }
21
+ layout={'fill'}
22
+ objectFit={'contain'}
23
+ src={currentImage.photo_image_url}
24
+ unoptimized={true}
25
+ />
26
+ <div
27
+ className='absolute top-0 left-0 flex items-center gap-2 p-3 text-white'
28
+ >
29
+ <button
30
+ onClick={() => setCurrentImage(null)}
31
+ className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white">
32
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
33
+ <path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12"></path>
34
+ </svg>
35
+ </button>
36
+ </div>
37
+ <div className="absolute top-0 right-0 flex items-center gap-2 p-3 text-white">
38
+ <a
39
+ href={currentImage.photo_url}
40
+ className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white"
41
+ target="_blank" title="View on Unsplash"
42
+ rel="noreferrer">
43
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
44
+ <path strokeLinecap="round" strokeLinejoin="round" d="M13.5 6H5.25A2.25 2.25 0 003 8.25v10.5A2.25 2.25 0 005.25 21h10.5A2.25 2.25 0 0018 18.75V10.5m-10.5 6L21 3m0 0h-5.25M21 3v5.25"></path>
45
+ </svg>
46
+ </a>
47
+ <button
48
+ onClick={() => downloadImage(currentImage.photo_image_url, `${currentImage.photo_id}.png`)}
49
+ className="rounded-full bg-black/50 p-2 text-white/75 backdrop-blur-lg transition hover:bg-black/75 hover:text-white" title="Download">
50
+ <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" strokeWidth="1.5" stroke="currentColor" aria-hidden="true" className="h-5 w-5">
51
+ <path strokeLinecap="round" strokeLinejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3">
52
+ </path>
53
+ </svg>
54
+ </button>
55
+ </div>
56
+ </>
57
+ }
58
+
59
+ </div>)
60
+ }
src/app/components/SearchBar.jsx ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ export function SearchBar({ search }) {
3
+ return (<form
4
+ onSubmit={e => {
5
+ e.preventDefault();
6
+ const formData = new FormData(e.target);
7
+ const text = formData.get('text');
8
+ search(text);
9
+ }}
10
+ className='relative mb-2'
11
+ >
12
+ <div className="absolute inset-y-0 left-0 flex items-center pl-3 pointer-events-none">
13
+ <svg className="w-4 h-4 text-gray-500 dark:text-gray-400" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 20 20">
14
+ <path stroke="currentColor" strokeLinecap="round" strokeLinejoin="round" strokeWidth="2" d="m19 19-4-4m0-7A7 7 0 1 1 1 8a7 7 0 0 1 14 0Z" />
15
+ </svg>
16
+ </div>
17
+ <input
18
+ type="search"
19
+ name="text"
20
+ id="default-search"
21
+ className="block w-full p-4 pl-10 text-sm text-gray-900 border border-gray-300 rounded-lg bg-gray-50 focus:ring-blue-500 focus:border-blue-500 dark:bg-gray-700 dark:border-gray-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500"
22
+ placeholder="Search for images..."
23
+ required
24
+ />
25
+ <button
26
+ type="submit"
27
+ className="text-white absolute right-2.5 bottom-2.5 bg-blue-700 hover:bg-blue-800 focus:ring-4 focus:outline-none focus:ring-blue-300 font-medium rounded-lg text-sm px-4 py-2 dark:bg-blue-600 dark:hover:bg-blue-700 dark:focus:ring-blue-800"
28
+ >
29
+ Search
30
+ </button>
31
+ </form>)
32
+ }
src/app/favicon.ico ADDED
src/app/globals.css ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @tailwind base;
2
+ @tailwind components;
3
+ @tailwind utilities;
4
+
5
+ :root {
6
+ --foreground-rgb: 255, 255, 255;
7
+ --background-start-rgb: 0, 0, 0;
8
+ --background-end-rgb: 0, 0, 0;
9
+ }
10
+
11
+ body {
12
+ color: rgb(var(--foreground-rgb));
13
+ background: linear-gradient(
14
+ to bottom,
15
+ transparent,
16
+ rgb(var(--background-end-rgb))
17
+ )
18
+ rgb(var(--background-start-rgb));
19
+ }
src/app/layout.js ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import './globals.css'
2
+ import { Inter } from 'next/font/google'
3
+
4
+ const inter = Inter({ subsets: ['latin'] })
5
+
6
+ export const metadata = {
7
+ title: 'Semantic Image Search',
8
+ description: 'Search for images using text (built w/ Transformers.js and Supabase)',
9
+ }
10
+
11
+ export default function RootLayout({ children }) {
12
+ return (
13
+ <html lang="en">
14
+ <body className={inter.className}>{children}</body>
15
+ </html>
16
+ )
17
+ }
src/app/page.js ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client'
2
+
3
+ import { useState } from 'react'
4
+ import { Modal } from './components/Modal';
5
+ import { SearchBar } from './components/SearchBar';
6
+ import { ImageGrid } from './components/ImageGrid';
7
+
8
+ export default function Home() {
9
+
10
+ // Application state
11
+ const [images, setImages] = useState(null);
12
+ const [currentImage, setCurrentImage] = useState(null);
13
+
14
+ const search = async (text) => {
15
+ if (!text) return;
16
+
17
+ const params = new URLSearchParams();
18
+ params.append('text', text);
19
+ params.append('threshold', 0.1);
20
+ params.append('limit', 100);
21
+
22
+ // Make a request to the /classify route on the server.
23
+ const result = await fetch(`/search?${params.toString()}`);
24
+
25
+ const json = await result.json();
26
+ setImages(json);
27
+ };
28
+
29
+ return (
30
+ <main className="mx-auto max-w-[1960px] p-4 relative">
31
+ <Modal currentImage={currentImage} setCurrentImage={setCurrentImage} />
32
+ <SearchBar search={search} />
33
+ <ImageGrid images={images} setCurrentImage={setCurrentImage} />
34
+ </main>
35
+ )
36
+ }
src/app/search/route.js ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Create a custom request handler for the /classify route.
2
+ // For more information, see https://nextjs.org/docs/app/building-your-application/routing/router-handlers
3
+
4
+ import { NextResponse } from 'next/server'
5
+ import ApplicationSingleton from '../app.js'
6
+
7
+ const parseInputs = (searchParams) => {
8
+ const text = searchParams.get('text');
9
+ if (!text) {
10
+ return {
11
+ error: 'Missing text parameter',
12
+ };
13
+ }
14
+ const threshold = searchParams.get('threshold');
15
+ const match_threshold = Number(threshold ?? 0.1);
16
+ if (isNaN(match_threshold) || match_threshold < 0 || match_threshold > 1) {
17
+ return {
18
+ error: `Invalid threshold parameter "${threshold}" (should be a number between 0 and 1)`,
19
+ };
20
+ }
21
+
22
+ const limit = searchParams.get('limit');
23
+ const match_count = Number(limit ?? 25);
24
+ if (isNaN(match_count) || !Number.isInteger(match_count) || match_count < 0 || match_count > 1000) {
25
+ return {
26
+ error: `Invalid limit parameter "${limit}" (should be an integer between 0 and 1000)`,
27
+ };
28
+ }
29
+
30
+ return { text, match_threshold, match_count }
31
+ }
32
+
33
+ // TODO: add caching
34
+
35
+ export async function GET(request) {
36
+ const parsedInputs = parseInputs(request.nextUrl.searchParams);
37
+ if (parsedInputs.error) {
38
+ return NextResponse.json({
39
+ error: parsedInputs.error,
40
+ }, { status: 400 });
41
+ }
42
+
43
+ // Valid inputs, so we can proceed
44
+ const { text, match_threshold, match_count } = parsedInputs;
45
+
46
+ // Get the tokenizer, model, and database singletons. When called for the first time,
47
+ // this will load the models and cache them for future use.
48
+ const [tokenizer, text_model, database] = await ApplicationSingleton.getInstance();
49
+
50
+ // Run tokenization
51
+ let text_inputs = tokenizer(text, { padding: true, truncation: true });
52
+
53
+ // Compute embeddings
54
+ const { text_embeds } = await text_model(text_inputs);
55
+ const query_embedding = text_embeds.tolist()[0];
56
+
57
+ // TODO add pagination?
58
+ let { data: images, error } = await database
59
+ .rpc('match_images', {
60
+ query_embedding,
61
+ match_threshold,
62
+ match_count,
63
+ });
64
+ if (error) {
65
+ console.warn('Error fetching images', error);
66
+ return NextResponse.json({
67
+ error: 'An error occurred while fetching images',
68
+ }, { status: 500 });
69
+ }
70
+
71
+
72
+ return NextResponse.json(images);
73
+ }
src/app/utils.js ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import { decode } from "blurhash"
3
+
4
+ const SIZE = 32;
5
+
6
+ export function blurHashToDataURL(hash) {
7
+ if (!hash) return undefined
8
+
9
+ const pixels = decode(hash, SIZE, SIZE)
10
+
11
+ const canvas = document.createElement("canvas");
12
+ canvas.width = SIZE;
13
+ canvas.height = SIZE;
14
+
15
+ const ctx = canvas.getContext("2d");
16
+ const imageData = ctx.createImageData(SIZE, SIZE);
17
+ imageData.data.set(pixels);
18
+ ctx.putImageData(imageData, 0, 0);
19
+
20
+ return canvas.toDataURL();
21
+ }
22
+
23
+ function downloadData(url, filename) {
24
+
25
+ // Create an anchor element with the data URL as the href attribute
26
+ const downloadLink = document.createElement('a');
27
+ downloadLink.href = url;
28
+
29
+ // Set the download attribute to specify the desired filename for the downloaded image
30
+ downloadLink.download = filename;
31
+
32
+ // Trigger the download
33
+ downloadLink.click();
34
+
35
+ // Clean up: remove the anchor element from the DOM
36
+ downloadLink.remove();
37
+ }
38
+
39
+ export function downloadImage(url, filename) {
40
+ fetch(url, {
41
+ headers: new Headers({
42
+ Origin: location.origin,
43
+ }),
44
+ mode: 'cors',
45
+ })
46
+ .then((response) => response.blob())
47
+ .then((blob) => {
48
+ let blobUrl = window.URL.createObjectURL(blob)
49
+ downloadData(blobUrl, filename)
50
+ })
51
+ .catch((e) => console.error(e))
52
+ }
tailwind.config.js ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @type {import('tailwindcss').Config} */
2
+ module.exports = {
3
+ content: [
4
+ './src/pages/**/*.{js,ts,jsx,tsx,mdx}',
5
+ './src/components/**/*.{js,ts,jsx,tsx,mdx}',
6
+ './src/app/**/*.{js,ts,jsx,tsx,mdx}',
7
+ ],
8
+ theme: {
9
+ extend: {
10
+ boxShadow: {
11
+ highlight: 'inset 0 0 0 1px rgba(255, 255, 255, 0.1)',
12
+ },
13
+ },
14
+ },
15
+ plugins: [],
16
+ }