roy214 commited on
Commit
8678b6a
·
verified ·
1 Parent(s): 8626d8e

Upload clip-vit-b32.ipynb

Browse files
Files changed (1) hide show
  1. clip-vit-b32.ipynb +1 -0
clip-vit-b32.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.11","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":396802,"sourceType":"datasetVersion","datasetId":175990},{"sourceId":329006,"sourceType":"datasetVersion","datasetId":139630}],"dockerImageVersionId":31011,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"!pip install transformers datasets ftfy regex tqdm\n!pip install git+https://github.com/openai/CLIP.git\n!pip install faiss-cpu --quiet","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"scrolled":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import boto3\nfrom botocore.exceptions import NoCredentialsError\nimport os\nfrom tqdm import tqdm\nimport os\nimport json\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader, Subset\nimport torch\nfrom transformers import CLIPProcessor, CLIPModel\nfrom tqdm.auto import tqdm\nfrom torchvision import transforms\nimport pickle\nimport numpy as np\nimport faiss\nimport matplotlib.pyplot as plt","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"os.environ['AWS_ACCESS_KEY_ID'] = 'add your key here'\nos.environ['AWS_SECRET_ACCESS_KEY'] = 'add your key here'\n\n# Khởi tạo client S3\ns3 = boto3.client('s3')\n\ndef upload_to_s3(file_path, bucket_name, object_name=None):\n \"\"\"Upload file to AWS S3.\"\"\"\n if object_name is None:\n object_name = os.path.basename(file_path)\n \n try:\n # Tải file lên S3\n s3.upload_file(file_path, bucket_name, object_name)\n # print(f\"File {file_path} uploaded to {bucket_name}/{object_name}.\")\n return f\"s3://{bucket_name}/{object_name}\"\n except FileNotFoundError:\n print(f\"File {file_path} not found.\")\n except NoCredentialsError:\n print(\"Credentials not available.\")\n return None\n\nbucket_name = 'image-text-retrieval'","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def collate_fn(batch):\n texts = [item['text'] for item in batch]\n images = [item['image'] for item in batch]\n # Let CLIPProcessor handle resizing, normalization, padding\n inputs = processor(text=texts, images=images, return_tensors=\"pt\", padding=True)\n return inputs\n\nclass FashionDataset(Dataset):\n def __init__(self, img_dir, meta_dir):\n self.img_dir = img_dir\n self.meta_dir = meta_dir\n self.ids = [f.split('.')[0] for f in os.listdir(img_dir) if f.endswith(('.jpg','.png'))]\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, idx):\n id_ = self.ids[idx]\n img_path = os.path.join(self.img_dir, f\"{id_}.jpg\")\n meta_path = os.path.join(self.meta_dir, f\"{id_}.json\")\n\n image_url = upload_to_s3(img_path, bucket_name)\n\n # In ra URL của ảnh đã tải lên\n if not image_url:\n print(f\"Image URL: {image_url}\")\n \n # Load image as PIL\n image = Image.open(img_path).convert('RGB')\n # Load metadata\n with open(meta_path, 'r') as f:\n meta = json.load(f)['data']\n\n # Compose caption\n parts = []\n if meta.get('productDisplayName'): parts.append(meta['productDisplayName'])\n if meta.get('brandName'): parts.append(meta['brandName'])\n if meta.get('baseColour') and meta['baseColour'] != 'NA': parts.append(meta['baseColour'])\n if meta.get('season'): parts.append(meta['season'])\n if meta.get('usage'): parts.append(meta['usage'])\n if meta.get('fashionType'): parts.append(meta['fashionType'])\n text = ', '.join(parts)\n\n return {'image': image, 'text': text}","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"IMG_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/images'\nMETA_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/styles'\n\n# Load CLIP\nmodel = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')\nprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')\n\n# Freeze most parameters except projection layers\nfor name, param in model.named_parameters():\n if 'proj' not in name:\n param.requires_grad = False\n\n# Dataset and DataLoader with custom collate_fn\ndataset = FashionDataset(IMG_DIR, META_DIR)\nsubset_dataset = Subset(dataset, list(range(1000)))\ndataloader = DataLoader(subset_dataset, batch_size=16, shuffle=True, num_workers=0, collate_fn=collate_fn)\n\n# Optimizer\noptimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=5e-5)\n\n# Device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel.to(device)\nmodel.train()\n\nepochs = 3\nfor epoch in range(epochs):\n loop = tqdm(dataloader, desc=f'Epoch {epoch+1}/{epochs}')\n for batch in loop:\n pixel_values = batch['pixel_values'].to(device)\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n\n outputs = model(input_ids=input_ids,\n attention_mask=attention_mask,\n pixel_values=pixel_values,\n return_loss=True)\n loss = outputs.loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loop.set_postfix(loss=loss.item())\n\n# Save\nmodel.save_pretrained('/kaggle/working/clip-finetuned-fashion')\nprocessor.save_pretrained('/kaggle/working/clip-finetuned-fashion')\n\nprint(\"Fine-tuning complete.\")\n","metadata":{"trusted":true,"scrolled":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nclass FashionDataset(Dataset):\n def __init__(self, img_dir, meta_dir):\n self.img_dir = img_dir\n self.meta_dir = meta_dir\n self.ids = [f.split('.')[0] for f in os.listdir(img_dir) if f.endswith(('.jpg','.png'))]\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, idx):\n id_ = self.ids[idx]\n img = Image.open(os.path.join(self.img_dir, f\"{id_}.jpg\")).convert('RGB')\n meta = json.load(open(os.path.join(self.meta_dir, f\"{id_}.json\")))['data']\n parts = [meta[k] for k in ['productDisplayName','brandName','baseColour','season','usage','fashionType']\n if meta.get(k) and meta[k] not in ('', 'NA')]\n text = \", \".join(parts)\n return {'id': id_, 'image': img, 'text': text}\n\n\ndef collate_fn(batch):\n ids = [item['id'] for item in batch]\n images = [item['image'] for item in batch]\n return ids, images\n\n\ndef build_faiss_index(model, processor, dataset, subset_indices, index_path, mapping_path,\n batch_size=32, device='cpu'):\n subset = Subset(dataset, subset_indices)\n loader = DataLoader(subset, batch_size=batch_size, shuffle=False,\n num_workers=2, collate_fn=collate_fn)\n\n model = model.to(device).eval()\n all_embs, id_map = [], []\n\n with torch.no_grad():\n for ids, images in tqdm(loader, desc=\"Extracting image embeddings\"):\n inputs = processor(images=images, return_tensors='pt', padding=True).to(device)\n img_emb = model.get_image_features(pixel_values=inputs['pixel_values'])\n img_emb = img_emb / img_emb.norm(p=2, dim=-1, keepdim=True)\n emb_np = img_emb.cpu().numpy().astype('float32')\n all_embs.append(emb_np)\n id_map.extend(ids)\n\n embeddings = np.vstack(all_embs)\n dim = embeddings.shape[1]\n index = faiss.IndexFlatIP(dim)\n index.add(embeddings)\n\n faiss.write_index(index, index_path)\n with open(mapping_path, 'wb') as f:\n pickle.dump(id_map, f)\n print(f\"✅ FAISS index saved to {index_path}\")\n print(f\"✅ ID mapping saved to {mapping_path}\") \n \nIMG_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/images'\nMETA_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/styles'\ndataset = FashionDataset(IMG_DIR, META_DIR)\n# choose a subset\nsubset_ids = list(range(1000))\n\nmodel = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')\nprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')\n\nbuild_faiss_index(\n model, processor, dataset, subset_ids,\n index_path='faiss_index.bin',\n mapping_path='id_map.json',\n batch_size=16,\n device='cuda'\n)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"MODEL_PATH = '/kaggle/working/clip-finetuned-fashion'\nINDEX_PATH = '/kaggle/working/faiss_index.bin'\nMAPPING_PATH = '/kaggle/working/id_map.json'\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nmodel = CLIPModel.from_pretrained(MODEL_PATH).to(device).eval()\nprocessor = CLIPProcessor.from_pretrained(MODEL_PATH)\n\nindex = faiss.read_index(INDEX_PATH)\n\nwith open(MAPPING_PATH, 'rb') as f:\n id_map = pickle.load(f)\n\ndef search_faiss(model, processor, index, id_map, prompt, top_k=5, device='cpu'):\n inputs = processor(text=[prompt], return_tensors='pt', padding=True).to(device)\n with torch.no_grad():\n txt_emb = model.get_text_features(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])\n txt_emb = txt_emb / txt_emb.norm(p=2, dim=-1, keepdim=True)\n q = txt_emb.cpu().numpy().astype('float32')\n\n D, I = index.search(q, top_k)\n return [(id_map[i], float(D[0][j])) for j, i in enumerate(I[0])]\n\nresults = search_faiss(\n model, processor,\n index, id_map,\n prompt=\"Dress Women Apparel Red\",\n top_k=5,\n device=device\n)\n\n\ndef show_image_by_id(img_id, img_dir, label=None):\n img_path = os.path.join(img_dir, f\"{img_id}.jpg\")\n if not os.path.exists(img_path):\n print(f\"❌ Image {img_id} not found in {img_dir}\")\n return\n image = Image.open(img_path).convert(\"RGB\")\n plt.imshow(image)\n plt.axis(\"off\")\n title = f\"Image ID: {img_id}\"\n if label:\n title += f\"\\nLabel: {label}\"\n plt.title(title)\n plt.show()\n\n\nIMG_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/images'\nfor img_id, score in results:\n show_image_by_id(img_id, IMG_DIR, score)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from huggingface_hub import login\nlogin()","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from huggingface_hub import HfApi\n\napi = HfApi()\nyour_username = 'roy214'\napi.create_repo(repo_id=f\"{your_username}/clip-finetuned-fashion\", repo_type=\"model\", exist_ok=True)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from huggingface_hub import upload_folder\n\nupload_folder(\n repo_id=f\"{your_username}/clip-finetuned-fashion\",\n folder_path=\"/kaggle/working/clip-finetuned-fashion\", # model + processor folder\n repo_type=\"model\"\n)\n\n# Push faiss_index.bin và id_map.json\nfrom huggingface_hub import upload_file\n\nupload_file(\n path_or_fileobj=\"/kaggle/working/faiss_index.bin\",\n path_in_repo=\"faiss_index.bin\",\n repo_id=f\"{your_username}/clip-finetuned-fashion\",\n repo_type=\"model\"\n)\n\nupload_file(\n path_or_fileobj=\"/kaggle/working/id_map.json\",\n path_in_repo=\"id_map.json\",\n repo_id=f\"{your_username}/clip-finetuned-fashion\",\n repo_type=\"model\"\n)\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from transformers import CLIPModel, CLIPProcessor\nfrom huggingface_hub import hf_hub_download\nimport faiss\nimport pickle\nimport torch\n\nyour_username = 'roy214'\n\n# Load model từ Hugging Face Hub\nmodel = CLIPModel.from_pretrained(f\"{your_username}/clip-finetuned-fashion\").to(\"cpu\").eval()\nprocessor = CLIPProcessor.from_pretrained(f\"{your_username}/clip-finetuned-fashion\")\n\n# Load FAISS index và id_map\nindex_path = hf_hub_download(repo_id=f\"{your_username}/clip-finetuned-fashion\", filename=\"faiss_index.bin\")\nmapping_path = hf_hub_download(repo_id=f\"{your_username}/clip-finetuned-fashion\", filename=\"id_map.json\")\n\nindex = faiss.read_index(index_path)\nwith open(mapping_path, \"rb\") as f:\n id_map = pickle.load(f)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import faiss\nimport pickle\nimport torch\nfrom transformers import CLIPModel, CLIPProcessor\n\ndef search_faiss(model, processor, index, id_map, prompt, top_k=5, device='cpu'):\n inputs = processor(text=[prompt], return_tensors='pt', padding=True).to(device)\n with torch.no_grad():\n txt_emb = model.get_text_features(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])\n txt_emb = txt_emb / txt_emb.norm(p=2, dim=-1, keepdim=True)\n q = txt_emb.cpu().numpy().astype('float32')\n\n D, I = index.search(q, top_k)\n return [(id_map[i], float(D[0][j])) for j, i in enumerate(I[0])]\n\nresults = search_faiss(\n model, processor,\n index, id_map,\n prompt=\"Dress Women Apparel Red\",\n top_k=5\n)\n\ndef show_image_by_id(img_id, img_dir, label=None):\n img_path = os.path.join(img_dir, f\"{img_id}.jpg\")\n if not os.path.exists(img_path):\n print(f\"Image {img_id} not found in {img_dir}\")\n return\n image = Image.open(img_path).convert(\"RGB\")\n plt.imshow(image)\n plt.axis(\"off\")\n title = f\"Image ID: {img_id}\"\n if label:\n title += f\"\\nLabel: {label}\"\n plt.title(title)\n plt.show()\n\n\nIMG_DIR = '/kaggle/input/fashion-product-images-dataset/fashion-dataset/images'\nfor img_id, score in results:\n show_image_by_id(img_id, IMG_DIR, score)","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}