{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['https_proxy']='127.0.0.1:17890'\n",
    "os.environ['http_proxy']='127.0.0.1:17890'\n",
    "from datasets import load_dataset,Dataset,concatenate_datasets\n",
    "import pandas as pd\n",
    "import sys,os\n",
    "import datasets\n",
    "from tqdm import tqdm\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset,load_from_disk\n",
    "ds_0=load_from_disk(\"/data02/users/lz/code/UICoder/datasets/chunk[08-14]-arrow\")\n",
    "ds_1=load_from_disk(\"/data02/starmage/datasets/cc/arrows_5-7\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import GPT2TokenizerFast\n",
    "tokenizer = GPT2TokenizerFast.from_pretrained(\"openai-community/gpt2\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer([\"af,fdfad\", \"fdffaaa4564654\"], max_length=4096, truncation=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "\n",
    "def split_html_css(text):\n",
    "    html_content = re.sub(f'\\s+', ' ', text)\n",
    "\n",
    "    # 正则表达式匹配<style>标签及其内容\n",
    "    style_tag_pattern = r'<style[^>]*>([\\s\\S]*?)<\\/style>'\n",
    "\n",
    "    # 使用re.DOTALL使得.可以匹配包括换行符在内的任意字符\n",
    "    # re.IGNORECASE忽略大小写\n",
    "    style_content = re.search(style_tag_pattern, html_content, re.DOTALL | re.IGNORECASE)\n",
    "\n",
    "    # 提取<style>标签内的CSS内容\n",
    "    if style_content:\n",
    "        css_content = style_content.group(1).strip()\n",
    "    else:\n",
    "        css_content = ''\n",
    "\n",
    "    # 移除HTML中的<style>标签及其内容\n",
    "    clean_html = re.sub(style_tag_pattern, '', html_content, flags=re.DOTALL | re.IGNORECASE)\n",
    "\n",
    "    return  [css_content,clean_html]\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "step = 512*3\n",
    "start_index = 0\n",
    "for i, j in enumerate(tqdm(range(0, len(ds_3), step))):\n",
    "    ds_3.select(range(j, min(j+step, len(ds_3)))).to_parquet(f\"/data02/starmage/datasets/cc/all-parquets/{(start_index+i):05}.parquet\",batch_size=step)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import importlib\n",
    "import scripts.data_cc_pipeline.post_process as post_process\n",
    "importlib.reload(post_process)\n",
    "from scripts.data_cc_pipeline.post_process import detect_lang\n",
    "[i[0][\"label\"] for i in detect_lang([\"abc\", \"bcd\"], \"cuda\")]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "from datasets import concatenate_datasets\n",
    "from tqdm import tqdm\n",
    "\n",
    "all_parquets = []\n",
    "\n",
    "for root, dirs, files in os.walk(\"/data02/users/lz/code/UICoder/datasets/c4-wash/scored-chunks/chunk15-format-scored-parquet\"):\n",
    "    root = Path(root)\n",
    "    for file in files:\n",
    "        if file.endswith('.parquet'):\n",
    "            all_parquets.append(root/file)\n",
    "            \n",
    "ds_items = []\n",
    "for file in tqdm(all_parquets):\n",
    "    try:\n",
    "        df_item = pd.read_parquet(file)\n",
    "        ds_item = Dataset.from_pandas(df_item)\n",
    "        score = int(file.name.split('.')[0])\n",
    "        print(score)\n",
    "        #pos= str(file).find('chunk')\n",
    "        #chunk = int(str(file)[pos+5:pos+7])\n",
    "        ds_item = ds_item.add_column(\"score\", [score]*len(ds_item))\n",
    "        ds_items.append(ds_item)\n",
    "    except Exception as e:\n",
    "        print(f\"file to read {file}, {e}\")\n",
    "                \n",
    "ds = concatenate_datasets(ds_items)\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds.save_to_disk(\"/data02/starmage/datasets/cc/arrows_15_processed_1\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_1=ds.cast_column(\"image\", datasets.Image(decode=True))\n",
    "ds_2 = concatenate_datasets([ds_0,ds_1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds.save_to_disk(\"/data02/starmage/datasets/cc/arrows_5-7\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "p1=\"/data03/starmage/projects/UICoder/outputs/tmp111\"\n",
    "ds1=ds_0.select(range(0,10000))\n",
    "ds1.save_to_disk(p1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds1[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds1[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from scripts.data_cc_pipeline.post_process import detect_lang\n",
    "\n",
    "def func(items): \n",
    "    try:\n",
    "        items[\"scale\"]=[item.size for item in items['image']]    \n",
    "        items[\"lang\"]=[i[0][\"label\"] for i in detect_lang(items[\"text\"], device=\"cuda:7\")]\n",
    "        contents = [  split_html_css(text) for text in items[\"text\"]]\n",
    "        items[\"tokens\"] = [ list(map(len,tokenizer(con,  max_length=10240, truncation=True)[\"input_ids\"]))  for con in contents]        \n",
    "        return items    \n",
    "    except Exception as e:\n",
    "        print(e)\n",
    "        return None\n",
    "ds_tmp = ds_tmp.map(func, num_proc=32, batched=True, batch_size=128)\n",
    "ds_tmp.save_to_disk(p1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_tmp[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "aaa = [[11,1,2],[3,4]]\n",
    "list(map(len, aaa))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds=load_from_disk(\"/data02/starmage/datasets/cc/arrows_5-7_processed\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "d2 = Dataset.from_parquet(\"/data02/users/lz/code/UICoder/datasets/c4-wash/scored-chunks/chunk15-format-scored-parquet/2.parquet\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds.select(range(0,100)).save_to_disk(\"/data03/starmage/projects/UICoder/outputs/tmp123\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds1= datasets.load_from_disk(\"/data02/starmage/datasets/cc/arrows_15_processed\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import base64\n",
    "import hashlib\n",
    "from PIL import Image\n",
    "from io import BytesIO\n",
    "\n",
    "def image_to_hash(image_pil: Image):\n",
    "    # Convert PIL Image to Bytes\n",
    "    byte_arr = BytesIO()\n",
    "    image_pil.save(byte_arr, format='PNG')\n",
    "    # Perform Base64 encoding\n",
    "    hash_object = hashlib.sha256(byte_arr.getvalue())  \n",
    "    return hash_object.hexdigest()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def dudup(items, ids):\n",
    "    hash_values = [image_to_hash(im) for im in items[\"image\"]] \n",
    "    return {\"id\":ids, \"hash\":hash_values}\n",
    "\n",
    "\n",
    "ds1 = ds1.map(dudup, num_proc=64, batched=True, batch_size=256, with_indices=True  )\n",
    "df = ds1.select_columns(['id', 'hash']).to_pandas()\n",
    "no_dup_idx =  df[~df.duplicated('hash')][\"id\"].to_list()\n",
    "print(f\"duplicated count {len(ds1)-len(no_dup_idx)}\")\n",
    "ds_dedup=ds1.select(no_dup_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_dedup=ds_dedup.select_columns( ['image', 'bbox', 'text', 'score', 'scale', 'lang', 'tokens', 'hash'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_dedup.save_to_disk(\"/data02/starmage/datasets/cc/arrows_15_processed_nodup\", num_proc=32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds0= datasets.load_from_disk(\"/data02/starmage/datasets/cc/arrows_15_processed_nodup\")\n",
    "ds1= datasets.load_from_disk(\"/data02/starmage/datasets/cc/arrows_5-7_processed_nodup\")\n",
    "df0=ds0.select_columns([\"score\", \"tokens\", \"scale\", \"lang\", 'hash']).to_pandas()\n",
    "df1=ds1.select_columns([\"score\", \"tokens\", \"scale\", \"lang\", 'hash']).to_pandas()\n",
    "ds=concatenate_datasets([ds0, ds1])\n",
    "df = ds.select_columns([\"score\", \"tokens\", \"scale\", \"lang\", 'hash']).to_pandas()\n",
    "#df = df[~df.duplicated('hash')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df[df.duplicated('hash')].iloc[0].index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "id = random.choice(range(len(ds1)))\n",
    "ds1[id]['hash'] == df.iloc[id+len(ds0)][\"hash\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "id = random.choice(range(len(df1)))\n",
    "\n",
    "df1.loc[id]['hash']== image_to_hash(ds_train[id]['image'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "hashes1 = pd.read_csv(\"/data02/starmage/datasets/cc/8-15_hash.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "hashes1.duplicated('hash').sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_nodup_ids(df_to):   \n",
    "    df3= pd.concat([df_to[\"hash\"], hashes1])\n",
    "    ids_nodup= df_to[~df3.duplicated('hash', keep='last')[:len(df_to)]].index\n",
    "    print(len(df_to)-len(ids_nodup))\n",
    "    return ids_nodup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ids_nodup_0 = get_nodup_ids(df0)\n",
    "ds0_nodup = ds0.select(ids_nodup_0)\n",
    "ids_nodup_1 = get_nodup_ids(df1)\n",
    "ds1_nodup = ds1.select(ids_nodup_1)\n",
    "ds_to_sample = concatenate_datasets([ds0_nodup, ds1_nodup])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def sample_bench(ds_to_sample:datasets.Dataset, n =256*7):\n",
    "    df_to_sample = ds_to_sample.select_columns(['score', 'scale', 'lang', 'tokens', 'hash']).to_pandas()\n",
    "    df_to_sample[\"len1\"]=df_to_sample[\"tokens\"].apply(lambda x : x[0]+x[1])\n",
    "    def map_values(row):\n",
    "        if row <= 2048:\n",
    "            return 'short'\n",
    "        elif 2048 < row <= 4096:\n",
    "            return 'mid'\n",
    "        else:\n",
    "            return 'long'\n",
    "    df_to_sample[\"split\"] = df_to_sample[\"len1\"].apply(map_values)\n",
    "    df_to_sample=df_to_sample[df_to_sample['score'] >=4]\n",
    "\n",
    "    def sample_bench(df_all, m, values=[\"split\"]):    \n",
    "        df_sampled = df_all.groupby(values, group_keys=False).apply(lambda x: x.sample(min(m, len(x))))\n",
    "        return df_sampled\n",
    "\n",
    "    df_samples = sample_bench(df_to_sample, n)\n",
    "    ids = df_samples.index\n",
    "    ds_sampled = ds_to_sample.select(ids)\n",
    "    ds_sampled = ds_sampled.add_column('split', df_samples['split'].to_list())\n",
    "    ds_odd = ds_to_sample.select(list(set(range(len(ds_to_sample)))-set(ids)))\n",
    "    return ds_sampled, ds_odd\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_sampled, ds_odd = sample_bench(ds_to_sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_odd.save_to_disk(\"/data02/starmage/datasets/cc/train_arrows_5-8_15\", num_proc= 16)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "df_all = ds_sampled.select_columns(['split']).to_pandas()\n",
    "id_short = list(df_all[df_all['split']=='short'].index)\n",
    "id_mid = list(df_all[df_all['split']=='mid'].index)\n",
    "id_long = list(df_all[df_all['split']=='long'].index)\n",
    "\n",
    "columns = ['image', 'bbox', 'text', 'scale', 'lang', 'tokens', 'hash']\n",
    "ds_short = ds_sampled.select(id_short).select_columns(columns)\n",
    "ds_mid = ds_sampled.select(id_mid).select_columns(columns)\n",
    "ds_long = ds_sampled.select(id_long).select_columns(columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(id_short),len(id_mid),len(ds_long)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import clip\n",
    "import torch\n",
    "device='cuda:7'\n",
    "CLIP_MODEL, CLIP_PREPROCESS = clip.load(\"ViT-B/32\", device=device)\n",
    "def encode_imgs(ims,device=device):\n",
    "    with torch.no_grad():\n",
    "        img_tmps = torch.stack([CLIP_PREPROCESS(im) for im in ims]).to(device)\n",
    "        img_feas = CLIP_MODEL.encode_image(img_tmps).cpu()\n",
    "    return img_feas   \n",
    "def img_sim(im1_h, im2_h):\n",
    "    return torch.nn.functional.cosine_similarity(im1_h, im2_h, dim=0).item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "def dedup_bench(ds, save_path):\n",
    "    all_clips=[]\n",
    "    def col_fn(samples):\n",
    "        items=[s[\"image\"] for s in samples]\n",
    "        return items\n",
    "    dl = DataLoader(ds, batch_size=32, collate_fn=col_fn, num_workers=1, shuffle=False)\n",
    "    for batch in tqdm(dl):\n",
    "        feats=encode_imgs(batch)\n",
    "        all_clips.extend(list(feats))\n",
    "    df_clip = pd.DataFrame({'clip':all_clips})\n",
    "    max_sims=[[],[]]\n",
    "    for i in tqdm(range(len(df_clip))):\n",
    "        max_sim=-1\n",
    "        max_j=None\n",
    "        for j in range(len(df_clip)):\n",
    "            if j==i:\n",
    "                continue\n",
    "\n",
    "            sim = img_sim(df_clip.iloc[i]['clip'], df_clip.iloc[j]['clip'])\n",
    "            if sim > max_sim:            \n",
    "                max_sim = sim\n",
    "                max_j = j\n",
    "        max_sims[0].append(max_sim)\n",
    "        max_sims[1].append(max_j)\n",
    "    df_sims= pd.DataFrame({'sim':max_sims[0], 'id':max_sims[1]})\n",
    "    df_sims_less=df_sims[df_sims.sim<0.95].sort_values(['sim'])\n",
    "    df_select = df_sims_less.iloc[:min(256+60, len(df_sims_less))]\n",
    "    ds_nodup = ds.select(df_select.index)\n",
    "    ds_nodup.to_parquet(save_path)\n",
    "    return ds_nodup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(ds_short)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "short_good = dedup_bench(ds_short, '/data02/starmage/datasets/cc/bench/short.parquet')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "mid_good = dedup_bench(ds_mid, '/data02/starmage/datasets/cc/bench/mid.parquet')\n",
    "long_good = dedup_bench(ds_long, '/data02/starmage/datasets/cc/bench/long.parquet')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds11= datasets.load_from_disk('/data02/starmage/datasets/cc/train_arrows_5-8_15')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds11[0]['tokens']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_short.to_parquet(\"/data02/starmage/datasets/cc/bench_256+40/short.parquet\")\n",
    "ds_mid.to_parquet(\"/data02/starmage/datasets/cc/bench_256+40/mid.parquet\")\n",
    "ds_long.to_parquet(\"/data02/starmage/datasets/cc/bench_256+40/long.parquet\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_sampled.to_pandas().duplicated('hash').sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_train_0 = datasets.load_from_disk(\"/data02/starmage/datasets/cc/arrows_8-14_processed\").select_columns(['score', 'scale', 'lang', 'tokens'])\n",
    "ds_train_1 = datasets.load_from_disk(\"/data02/starmage/datasets/cc/train_arrows_5-8_15\").select_columns(['score', 'scale', 'lang', 'tokens'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_train_0 = datasets.load_from_disk(\"/data02/starmage/datasets/cc/arrows_8-14_processed\")\n",
    "ds_train_1 = datasets.load_from_disk(\"/data02/starmage/datasets/cc/train_arrows_5-8_15\")\n",
    "hashes_train_0 = pd.read_csv(\"/data02/starmage/datasets/cc/8-14_hash.csv\")\n",
    "no_dup_ids = hashes_train_0[~hashes_train_0.duplicated('hash')].index\n",
    "ds_train_0_nodup = ds_train_0.select(no_dup_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(ds_total)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_total = concatenate_datasets([ds_train_1, ds_train_0_nodup])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from git import Repo\n",
    "import time\n",
    "from tqdm import tqdm\n",
    "import traceback\n",
    "\n",
    "os.environ['https_proxy']='127.0.0.1:17890'\n",
    "os.environ['http_proxy']='127.0.0.1:17890'\n",
    "repo_path=\"/data03/starmage/datasets/cc/final\"\n",
    "repo = Repo(repo_path)\n",
    "def git_push(repo:Repo, file_path, msg):\n",
    "    try:\n",
    "        repo.git.add(file_path)\n",
    "        repo.git.commit('-m', msg)\n",
    "        repo.git.push()\n",
    "        return True\n",
    "    except Exception as e:\n",
    "        print('Push failed:', e)\n",
    "        print(traceback.format_exc())\n",
    "        return False\n",
    "chunk_size = 1536\n",
    "start_chunk = 0\n",
    "for id,start in tqdm(enumerate(range(chunk_size*start_chunk, len(ds_total), chunk_size))):\n",
    "    fpath=f\"{repo_path}/data/{id+start_chunk:05d}.parquet\"\n",
    "    ds_total.select(range(start,min(start+chunk_size, len(ds_total)))).to_parquet(fpath)\n",
    "    #while not git_push(repo, fpath, f\"commit chunk {id+start_chunk}.\"):\n",
    "        #time.sleep(5)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(ds_total)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "repo_path=\"/data02/starmage/datasets/cc/tmp\"\n",
    "repo = Repo(repo_path)\n",
    "def is_file_committed(repo, file_path):\n",
    "    committed = False    \n",
    "    loginfo = repo.git.log('--', file_path)\n",
    "    print(loginfo)\n",
    "    if loginfo:  # if loginfo is not empty, the file has been committed\n",
    "        committed = True\n",
    "    return committed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "is_file_committed(repo, \"/data02/starmage/datasets/cc/tmp/data/0002.parquet\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for k in  ds_total[0].keys():\n",
    "    print(k, type(ds_total[0][k]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_stat=pd.concat([ds_train_0_nodup.to_pandas(), ds_train_1.to_pandas()])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "im = Image.open('/data03/starmage/projects/UICoder/data/bad/0.png')\n",
    "type(im)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_tmp = datasets.Dataset.from_parquet('/data02/starmage/datasets/cc/bench_256+40/short.parquet')\n",
    "im1 = ds_tmp[154]['image']\n",
    "im2 = ds_tmp[172]['image']\n",
    "im1.size,im2.size"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(sys.getrecursionlimit())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "feas = encode_imgs([ds_tmp[0]['image'],ds_tmp[1]['image']])\n",
    "feas[0].shape\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_clip = datasets.Dataset.from_dict({'clip':feas})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('/data03/starmage/projects/UICoder/outputs/eval_vision2ui-design2code_20240515022540/efbda2a06816ea51c22928270aaece6e/prediction.html', 'r') as f:\n",
    "    str1 = f.read()\n",
    "len(str1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "csv_dirs=[\"/data03/starmage/projects/UICoder/outputs/eval_gpt4v\",\n",
    "          \"/data03/starmage/projects/UICoder/outputs/eval_cogvlm\",         \n",
    "         \"/data03/starmage/projects/UICoder/outputs/eval_ws\",\n",
    "         #\"/data03/starmage/projects/UICoder/outputs/eval_stage2\",\n",
    "         \"/data03/starmage/projects/UICoder/outputs/eval_d2c\",\n",
    "         \"/data03/starmage/projects/UICoder/outputs/eval_stage0\",\n",
    "         \"/data03/starmage/projects/UICoder/outputs/eval_stag0ws\",\n",
    "         ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os,sys\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "def format_decimal(num):\n",
    "    formatted = \"{:.2f}\".format(num)\n",
    "    if formatted.startswith(\"0\"):\n",
    "        return formatted[1:]\n",
    "    else:\n",
    "        return formatted\n",
    "\n",
    "\n",
    "def format_csv(path):\n",
    "    with open(path) as f:\n",
    "        data= f.read()\n",
    "    splits = data.split('\\n')\n",
    "    if len(splits) != 2:\n",
    "        df_tmp=pd.read_csv(path)\n",
    "        df_tmp.columns = [c.strip() for c in df_tmp.columns]\n",
    "        return df_tmp\n",
    "    head, tail=data.split('\\n')\n",
    "    \n",
    "    with open('tmp.csv', 'w') as f:\n",
    "        f.write(f\"{head}\\n\")\n",
    "    key = '/data03'\n",
    "    start = 0\n",
    "    while start<len(tail):\n",
    "        pos1 = tail.find(key, start)\n",
    "        if pos1==-1:\n",
    "            break\n",
    "        pos2 = tail.find(key, pos1+1)\n",
    "        if pos2==-1:\n",
    "            break\n",
    "        pos3=tail.find(key, pos2+1)\n",
    "        line = tail[pos1:pos3]\n",
    "        start = pos3\n",
    "        with open('tmp.csv', 'a+') as f:\n",
    "            f.write(f\"{line}\\n\")\n",
    "    df_tmp = pd.read_csv('tmp.csv')\n",
    "    df_tmp.columns = [c.strip() for c in df_tmp.columns]\n",
    "    return df_tmp\n",
    "\n",
    "rows=[]\n",
    "column_names=[]\n",
    "for dir in csv_dirs:\n",
    "    for root,dirs,files in os.walk(dir):\n",
    "        if len(files) > 0 and files[0] != 'metrics_result.csv':\n",
    "            continue\n",
    "        files = sorted(files, key=lambda x : x.split('_')[0])\n",
    "        for file in files:\n",
    "            if file == 'metrics_result.csv':\n",
    "                row=[]\n",
    "                model_name = root.split('/')[-2].split('_')[-1]\n",
    "                dataset_name = 'v2u_'+root.split('/')[-1].split('_')[-2]\n",
    "                full_path = os.path.join(root, file)\n",
    "                #df = pd.read_csv(full_path)\n",
    "                print(full_path)\n",
    "                df=format_csv(full_path)\n",
    "                df['visual_score']=df[['block_match', 'text_match', 'position_match', 'text_color_match', 'clip_score']].mean(axis=1)\n",
    "                row.extend([model_name, dataset_name])\n",
    "                columns = []\n",
    "                for c in df.columns:\n",
    "                    if c not in [\"origin\",\"pred\"]:\n",
    "                        columns.append(c)\n",
    "                        value=f\"{df[c].mean():.2f} (±{df[c].std():.2f})\"\n",
    "                        # $0.01_{\\pm.03}$\n",
    "                        #s1 = format_decimal(df[c].std())                \n",
    "                        #value=f'${df[c].mean():.2f}_{{±{s1}}}$'\n",
    "                        row.append(value)                           \n",
    "                rows.append(row) \n",
    "                if len(column_names) ==0:\n",
    "                    column_names = ['model', 'dataset']+columns\n",
    "                \n",
    "                "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>model</th>\n",
       "      <th>dataset</th>\n",
       "      <th>bleu</th>\n",
       "      <th>rouge</th>\n",
       "      <th>tree_bleu</th>\n",
       "      <th>tree_rouge_1</th>\n",
       "      <th>mse_value</th>\n",
       "      <th>ssim_value</th>\n",
       "      <th>clip_sim</th>\n",
       "      <th>block_match</th>\n",
       "      <th>text_match</th>\n",
       "      <th>position_match</th>\n",
       "      <th>text_color_match</th>\n",
       "      <th>clip_score</th>\n",
       "      <th>visual_score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>cogvlm</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.22 (±0.28)</td>\n",
       "      <td>0.26 (±0.28)</td>\n",
       "      <td>0.04 (±0.11)</td>\n",
       "      <td>0.01 (±0.01)</td>\n",
       "      <td>0.36 (±0.30)</td>\n",
       "      <td>0.60 (±0.13)</td>\n",
       "      <td>0.65 (±0.10)</td>\n",
       "      <td>0.22 (±0.32)</td>\n",
       "      <td>0.43 (±0.45)</td>\n",
       "      <td>0.33 (±0.36)</td>\n",
       "      <td>0.32 (±0.38)</td>\n",
       "      <td>0.66 (±0.26)</td>\n",
       "      <td>0.39 (±0.30)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>ws</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.11 (±0.16)</td>\n",
       "      <td>0.13 (±0.17)</td>\n",
       "      <td>0.21 (±0.22)</td>\n",
       "      <td>0.03 (±0.03)</td>\n",
       "      <td>0.39 (±0.27)</td>\n",
       "      <td>0.61 (±0.15)</td>\n",
       "      <td>0.64 (±0.11)</td>\n",
       "      <td>0.13 (±0.22)</td>\n",
       "      <td>0.59 (±0.37)</td>\n",
       "      <td>0.50 (±0.33)</td>\n",
       "      <td>0.54 (±0.36)</td>\n",
       "      <td>0.65 (±0.30)</td>\n",
       "      <td>0.48 (±0.27)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>stag0ws</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.15 (±0.17)</td>\n",
       "      <td>0.17 (±0.17)</td>\n",
       "      <td>0.15 (±0.15)</td>\n",
       "      <td>0.04 (±0.04)</td>\n",
       "      <td>0.44 (±0.33)</td>\n",
       "      <td>0.59 (±0.15)</td>\n",
       "      <td>0.65 (±0.11)</td>\n",
       "      <td>0.14 (±0.23)</td>\n",
       "      <td>0.47 (±0.42)</td>\n",
       "      <td>0.40 (±0.36)</td>\n",
       "      <td>0.32 (±0.32)</td>\n",
       "      <td>0.55 (±0.35)</td>\n",
       "      <td>0.38 (±0.29)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>d2c</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.33 (±0.29)</td>\n",
       "      <td>0.41 (±0.26)</td>\n",
       "      <td>0.43 (±0.23)</td>\n",
       "      <td>0.06 (±0.03)</td>\n",
       "      <td>0.37 (±0.28)</td>\n",
       "      <td>0.61 (±0.11)</td>\n",
       "      <td>0.68 (±0.10)</td>\n",
       "      <td>0.49 (±0.36)</td>\n",
       "      <td>0.77 (±0.35)</td>\n",
       "      <td>0.58 (±0.28)</td>\n",
       "      <td>0.55 (±0.28)</td>\n",
       "      <td>0.66 (±0.29)</td>\n",
       "      <td>0.61 (±0.28)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>gpt4v</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.49 (±0.29)</td>\n",
       "      <td>0.58 (±0.23)</td>\n",
       "      <td>0.35 (±0.18)</td>\n",
       "      <td>0.10 (±0.05)</td>\n",
       "      <td>0.38 (±0.28)</td>\n",
       "      <td>0.57 (±0.11)</td>\n",
       "      <td>0.67 (±0.10)</td>\n",
       "      <td>0.60 (±0.40)</td>\n",
       "      <td>0.74 (±0.41)</td>\n",
       "      <td>0.62 (±0.36)</td>\n",
       "      <td>0.53 (±0.32)</td>\n",
       "      <td>0.60 (±0.34)</td>\n",
       "      <td>0.62 (±0.35)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>stage0</td>\n",
       "      <td>v2u_long</td>\n",
       "      <td>0.40 (±0.26)</td>\n",
       "      <td>0.41 (±0.24)</td>\n",
       "      <td>0.39 (±0.17)</td>\n",
       "      <td>0.15 (±0.07)</td>\n",
       "      <td>0.47 (±0.44)</td>\n",
       "      <td>0.58 (±0.16)</td>\n",
       "      <td>0.69 (±0.12)</td>\n",
       "      <td>0.46 (±0.34)</td>\n",
       "      <td>0.80 (±0.25)</td>\n",
       "      <td>0.66 (±0.23)</td>\n",
       "      <td>0.62 (±0.26)</td>\n",
       "      <td>0.73 (±0.22)</td>\n",
       "      <td>0.65 (±0.21)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>cogvlm</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.27 (±0.31)</td>\n",
       "      <td>0.31 (±0.30)</td>\n",
       "      <td>0.04 (±0.11)</td>\n",
       "      <td>0.01 (±0.02)</td>\n",
       "      <td>0.32 (±0.30)</td>\n",
       "      <td>0.58 (±0.14)</td>\n",
       "      <td>0.66 (±0.10)</td>\n",
       "      <td>0.19 (±0.29)</td>\n",
       "      <td>0.45 (±0.46)</td>\n",
       "      <td>0.35 (±0.37)</td>\n",
       "      <td>0.36 (±0.41)</td>\n",
       "      <td>0.64 (±0.30)</td>\n",
       "      <td>0.40 (±0.31)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>ws</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.10 (±0.14)</td>\n",
       "      <td>0.11 (±0.14)</td>\n",
       "      <td>0.22 (±0.25)</td>\n",
       "      <td>0.03 (±0.04)</td>\n",
       "      <td>0.37 (±0.38)</td>\n",
       "      <td>0.59 (±0.16)</td>\n",
       "      <td>0.67 (±0.11)</td>\n",
       "      <td>0.13 (±0.20)</td>\n",
       "      <td>0.61 (±0.32)</td>\n",
       "      <td>0.56 (±0.32)</td>\n",
       "      <td>0.59 (±0.35)</td>\n",
       "      <td>0.73 (±0.25)</td>\n",
       "      <td>0.52 (±0.23)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>stag0ws</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.16 (±0.17)</td>\n",
       "      <td>0.17 (±0.18)</td>\n",
       "      <td>0.11 (±0.14)</td>\n",
       "      <td>0.04 (±0.04)</td>\n",
       "      <td>0.34 (±0.28)</td>\n",
       "      <td>0.56 (±0.16)</td>\n",
       "      <td>0.67 (±0.11)</td>\n",
       "      <td>0.10 (±0.18)</td>\n",
       "      <td>0.45 (±0.42)</td>\n",
       "      <td>0.40 (±0.38)</td>\n",
       "      <td>0.33 (±0.35)</td>\n",
       "      <td>0.51 (±0.38)</td>\n",
       "      <td>0.36 (±0.30)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>d2c</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.45 (±0.31)</td>\n",
       "      <td>0.51 (±0.28)</td>\n",
       "      <td>0.31 (±0.26)</td>\n",
       "      <td>0.05 (±0.05)</td>\n",
       "      <td>0.35 (±0.28)</td>\n",
       "      <td>0.56 (±0.14)</td>\n",
       "      <td>0.70 (±0.10)</td>\n",
       "      <td>0.55 (±0.35)</td>\n",
       "      <td>0.84 (±0.28)</td>\n",
       "      <td>0.66 (±0.24)</td>\n",
       "      <td>0.62 (±0.28)</td>\n",
       "      <td>0.75 (±0.23)</td>\n",
       "      <td>0.69 (±0.23)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>gpt4v</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.55 (±0.32)</td>\n",
       "      <td>0.62 (±0.28)</td>\n",
       "      <td>0.28 (±0.18)</td>\n",
       "      <td>0.11 (±0.06)</td>\n",
       "      <td>0.34 (±0.29)</td>\n",
       "      <td>0.55 (±0.12)</td>\n",
       "      <td>0.71 (±0.10)</td>\n",
       "      <td>0.61 (±0.38)</td>\n",
       "      <td>0.78 (±0.38)</td>\n",
       "      <td>0.65 (±0.32)</td>\n",
       "      <td>0.58 (±0.33)</td>\n",
       "      <td>0.66 (±0.33)</td>\n",
       "      <td>0.65 (±0.33)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>stage0</td>\n",
       "      <td>v2u_mid</td>\n",
       "      <td>0.46 (±0.27)</td>\n",
       "      <td>0.47 (±0.26)</td>\n",
       "      <td>0.39 (±0.19)</td>\n",
       "      <td>0.22 (±0.11)</td>\n",
       "      <td>0.39 (±0.37)</td>\n",
       "      <td>0.56 (±0.16)</td>\n",
       "      <td>0.71 (±0.10)</td>\n",
       "      <td>0.46 (±0.34)</td>\n",
       "      <td>0.83 (±0.22)</td>\n",
       "      <td>0.72 (±0.22)</td>\n",
       "      <td>0.70 (±0.26)</td>\n",
       "      <td>0.77 (±0.21)</td>\n",
       "      <td>0.69 (±0.19)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>cogvlm</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.33 (±0.33)</td>\n",
       "      <td>0.40 (±0.33)</td>\n",
       "      <td>0.03 (±0.08)</td>\n",
       "      <td>0.01 (±0.03)</td>\n",
       "      <td>0.35 (±0.38)</td>\n",
       "      <td>0.59 (±0.15)</td>\n",
       "      <td>0.68 (±0.11)</td>\n",
       "      <td>0.25 (±0.33)</td>\n",
       "      <td>0.54 (±0.45)</td>\n",
       "      <td>0.41 (±0.35)</td>\n",
       "      <td>0.45 (±0.42)</td>\n",
       "      <td>0.67 (±0.29)</td>\n",
       "      <td>0.46 (±0.31)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>ws</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.17 (±0.19)</td>\n",
       "      <td>0.20 (±0.20)</td>\n",
       "      <td>0.08 (±0.13)</td>\n",
       "      <td>0.03 (±0.04)</td>\n",
       "      <td>0.32 (±0.36)</td>\n",
       "      <td>0.62 (±0.17)</td>\n",
       "      <td>0.69 (±0.12)</td>\n",
       "      <td>0.19 (±0.26)</td>\n",
       "      <td>0.65 (±0.33)</td>\n",
       "      <td>0.60 (±0.32)</td>\n",
       "      <td>0.63 (±0.36)</td>\n",
       "      <td>0.78 (±0.20)</td>\n",
       "      <td>0.57 (±0.24)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>d2c</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.50 (±0.32)</td>\n",
       "      <td>0.56 (±0.29)</td>\n",
       "      <td>0.14 (±0.20)</td>\n",
       "      <td>0.04 (±0.05)</td>\n",
       "      <td>0.39 (±0.36)</td>\n",
       "      <td>0.58 (±0.15)</td>\n",
       "      <td>0.68 (±0.10)</td>\n",
       "      <td>0.65 (±0.31)</td>\n",
       "      <td>0.92 (±0.16)</td>\n",
       "      <td>0.72 (±0.16)</td>\n",
       "      <td>0.68 (±0.24)</td>\n",
       "      <td>0.80 (±0.10)</td>\n",
       "      <td>0.75 (±0.14)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>stag0ws</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.23 (±0.23)</td>\n",
       "      <td>0.26 (±0.22)</td>\n",
       "      <td>0.09 (±0.10)</td>\n",
       "      <td>0.06 (±0.06)</td>\n",
       "      <td>0.37 (±0.33)</td>\n",
       "      <td>0.58 (±0.17)</td>\n",
       "      <td>0.68 (±0.11)</td>\n",
       "      <td>0.17 (±0.26)</td>\n",
       "      <td>0.50 (±0.42)</td>\n",
       "      <td>0.44 (±0.38)</td>\n",
       "      <td>0.41 (±0.40)</td>\n",
       "      <td>0.57 (±0.36)</td>\n",
       "      <td>0.42 (±0.32)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>gpt4v</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.69 (±0.28)</td>\n",
       "      <td>0.74 (±0.24)</td>\n",
       "      <td>0.21 (±0.16)</td>\n",
       "      <td>0.12 (±0.07)</td>\n",
       "      <td>0.30 (±0.25)</td>\n",
       "      <td>0.61 (±0.14)</td>\n",
       "      <td>0.74 (±0.10)</td>\n",
       "      <td>0.64 (±0.39)</td>\n",
       "      <td>0.79 (±0.37)</td>\n",
       "      <td>0.67 (±0.32)</td>\n",
       "      <td>0.59 (±0.34)</td>\n",
       "      <td>0.67 (±0.31)</td>\n",
       "      <td>0.68 (±0.32)</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>stage0</td>\n",
       "      <td>v2u_short</td>\n",
       "      <td>0.58 (±0.26)</td>\n",
       "      <td>0.61 (±0.24)</td>\n",
       "      <td>0.33 (±0.24)</td>\n",
       "      <td>0.30 (±0.22)</td>\n",
       "      <td>0.41 (±0.44)</td>\n",
       "      <td>0.59 (±0.17)</td>\n",
       "      <td>0.73 (±0.11)</td>\n",
       "      <td>0.57 (±0.38)</td>\n",
       "      <td>0.81 (±0.31)</td>\n",
       "      <td>0.70 (±0.28)</td>\n",
       "      <td>0.66 (±0.32)</td>\n",
       "      <td>0.73 (±0.27)</td>\n",
       "      <td>0.70 (±0.27)</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      model    dataset          bleu         rouge     tree_bleu  \\\n",
       "3    cogvlm   v2u_long  0.22 (±0.28)  0.26 (±0.28)  0.04 (±0.11)   \n",
       "6        ws   v2u_long  0.11 (±0.16)  0.13 (±0.17)  0.21 (±0.22)   \n",
       "16  stag0ws   v2u_long  0.15 (±0.17)  0.17 (±0.17)  0.15 (±0.15)   \n",
       "9       d2c   v2u_long  0.33 (±0.29)  0.41 (±0.26)  0.43 (±0.23)   \n",
       "1     gpt4v   v2u_long  0.49 (±0.29)  0.58 (±0.23)  0.35 (±0.18)   \n",
       "12   stage0   v2u_long  0.40 (±0.26)  0.41 (±0.24)  0.39 (±0.17)   \n",
       "5    cogvlm    v2u_mid  0.27 (±0.31)  0.31 (±0.30)  0.04 (±0.11)   \n",
       "7        ws    v2u_mid  0.10 (±0.14)  0.11 (±0.14)  0.22 (±0.25)   \n",
       "17  stag0ws    v2u_mid  0.16 (±0.17)  0.17 (±0.18)  0.11 (±0.14)   \n",
       "10      d2c    v2u_mid  0.45 (±0.31)  0.51 (±0.28)  0.31 (±0.26)   \n",
       "2     gpt4v    v2u_mid  0.55 (±0.32)  0.62 (±0.28)  0.28 (±0.18)   \n",
       "14   stage0    v2u_mid  0.46 (±0.27)  0.47 (±0.26)  0.39 (±0.19)   \n",
       "4    cogvlm  v2u_short  0.33 (±0.33)  0.40 (±0.33)  0.03 (±0.08)   \n",
       "8        ws  v2u_short  0.17 (±0.19)  0.20 (±0.20)  0.08 (±0.13)   \n",
       "11      d2c  v2u_short  0.50 (±0.32)  0.56 (±0.29)  0.14 (±0.20)   \n",
       "15  stag0ws  v2u_short  0.23 (±0.23)  0.26 (±0.22)  0.09 (±0.10)   \n",
       "0     gpt4v  v2u_short  0.69 (±0.28)  0.74 (±0.24)  0.21 (±0.16)   \n",
       "13   stage0  v2u_short  0.58 (±0.26)  0.61 (±0.24)  0.33 (±0.24)   \n",
       "\n",
       "    tree_rouge_1     mse_value    ssim_value      clip_sim   block_match  \\\n",
       "3   0.01 (±0.01)  0.36 (±0.30)  0.60 (±0.13)  0.65 (±0.10)  0.22 (±0.32)   \n",
       "6   0.03 (±0.03)  0.39 (±0.27)  0.61 (±0.15)  0.64 (±0.11)  0.13 (±0.22)   \n",
       "16  0.04 (±0.04)  0.44 (±0.33)  0.59 (±0.15)  0.65 (±0.11)  0.14 (±0.23)   \n",
       "9   0.06 (±0.03)  0.37 (±0.28)  0.61 (±0.11)  0.68 (±0.10)  0.49 (±0.36)   \n",
       "1   0.10 (±0.05)  0.38 (±0.28)  0.57 (±0.11)  0.67 (±0.10)  0.60 (±0.40)   \n",
       "12  0.15 (±0.07)  0.47 (±0.44)  0.58 (±0.16)  0.69 (±0.12)  0.46 (±0.34)   \n",
       "5   0.01 (±0.02)  0.32 (±0.30)  0.58 (±0.14)  0.66 (±0.10)  0.19 (±0.29)   \n",
       "7   0.03 (±0.04)  0.37 (±0.38)  0.59 (±0.16)  0.67 (±0.11)  0.13 (±0.20)   \n",
       "17  0.04 (±0.04)  0.34 (±0.28)  0.56 (±0.16)  0.67 (±0.11)  0.10 (±0.18)   \n",
       "10  0.05 (±0.05)  0.35 (±0.28)  0.56 (±0.14)  0.70 (±0.10)  0.55 (±0.35)   \n",
       "2   0.11 (±0.06)  0.34 (±0.29)  0.55 (±0.12)  0.71 (±0.10)  0.61 (±0.38)   \n",
       "14  0.22 (±0.11)  0.39 (±0.37)  0.56 (±0.16)  0.71 (±0.10)  0.46 (±0.34)   \n",
       "4   0.01 (±0.03)  0.35 (±0.38)  0.59 (±0.15)  0.68 (±0.11)  0.25 (±0.33)   \n",
       "8   0.03 (±0.04)  0.32 (±0.36)  0.62 (±0.17)  0.69 (±0.12)  0.19 (±0.26)   \n",
       "11  0.04 (±0.05)  0.39 (±0.36)  0.58 (±0.15)  0.68 (±0.10)  0.65 (±0.31)   \n",
       "15  0.06 (±0.06)  0.37 (±0.33)  0.58 (±0.17)  0.68 (±0.11)  0.17 (±0.26)   \n",
       "0   0.12 (±0.07)  0.30 (±0.25)  0.61 (±0.14)  0.74 (±0.10)  0.64 (±0.39)   \n",
       "13  0.30 (±0.22)  0.41 (±0.44)  0.59 (±0.17)  0.73 (±0.11)  0.57 (±0.38)   \n",
       "\n",
       "      text_match position_match text_color_match    clip_score  visual_score  \n",
       "3   0.43 (±0.45)   0.33 (±0.36)     0.32 (±0.38)  0.66 (±0.26)  0.39 (±0.30)  \n",
       "6   0.59 (±0.37)   0.50 (±0.33)     0.54 (±0.36)  0.65 (±0.30)  0.48 (±0.27)  \n",
       "16  0.47 (±0.42)   0.40 (±0.36)     0.32 (±0.32)  0.55 (±0.35)  0.38 (±0.29)  \n",
       "9   0.77 (±0.35)   0.58 (±0.28)     0.55 (±0.28)  0.66 (±0.29)  0.61 (±0.28)  \n",
       "1   0.74 (±0.41)   0.62 (±0.36)     0.53 (±0.32)  0.60 (±0.34)  0.62 (±0.35)  \n",
       "12  0.80 (±0.25)   0.66 (±0.23)     0.62 (±0.26)  0.73 (±0.22)  0.65 (±0.21)  \n",
       "5   0.45 (±0.46)   0.35 (±0.37)     0.36 (±0.41)  0.64 (±0.30)  0.40 (±0.31)  \n",
       "7   0.61 (±0.32)   0.56 (±0.32)     0.59 (±0.35)  0.73 (±0.25)  0.52 (±0.23)  \n",
       "17  0.45 (±0.42)   0.40 (±0.38)     0.33 (±0.35)  0.51 (±0.38)  0.36 (±0.30)  \n",
       "10  0.84 (±0.28)   0.66 (±0.24)     0.62 (±0.28)  0.75 (±0.23)  0.69 (±0.23)  \n",
       "2   0.78 (±0.38)   0.65 (±0.32)     0.58 (±0.33)  0.66 (±0.33)  0.65 (±0.33)  \n",
       "14  0.83 (±0.22)   0.72 (±0.22)     0.70 (±0.26)  0.77 (±0.21)  0.69 (±0.19)  \n",
       "4   0.54 (±0.45)   0.41 (±0.35)     0.45 (±0.42)  0.67 (±0.29)  0.46 (±0.31)  \n",
       "8   0.65 (±0.33)   0.60 (±0.32)     0.63 (±0.36)  0.78 (±0.20)  0.57 (±0.24)  \n",
       "11  0.92 (±0.16)   0.72 (±0.16)     0.68 (±0.24)  0.80 (±0.10)  0.75 (±0.14)  \n",
       "15  0.50 (±0.42)   0.44 (±0.38)     0.41 (±0.40)  0.57 (±0.36)  0.42 (±0.32)  \n",
       "0   0.79 (±0.37)   0.67 (±0.32)     0.59 (±0.34)  0.67 (±0.31)  0.68 (±0.32)  \n",
       "13  0.81 (±0.31)   0.70 (±0.28)     0.66 (±0.32)  0.73 (±0.27)  0.70 (±0.27)  "
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_res = pd.DataFrame(rows, columns=column_names)\n",
    "sort_keys = ['dataset', 'tree_rouge_1','block_match', 'position_match', 'text_match', 'text_color_match', 'clip_score']\n",
    "df1 = df_res.sort_values(sort_keys)\n",
    "df1.to_csv('res.csv')\n",
    "df1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "keys_1=['dataset','tree_rouge_1',  'clip_sim','visual_score']\n",
    "df2=df1[['dataset','model','tree_rouge_1',  'clip_sim','visual_score']].sort_values(keys_1)\n",
    "#df2=df2[df2['model'] != 'stage2'].reset_index(drop=True)\n",
    "df2.to_csv('/data03/starmage/projects/UICoder/outputs/dataset_track_res.csv')\n",
    "df2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys,os\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.image as mpimg\n",
    "import random\n",
    "\n",
    "all_imgs =[]\n",
    "for root,dirs,files in os.walk('/data03/starmage/projects/UICoder/outputs/eval_stage2'):\n",
    "    root = Path(root)\n",
    "    for file in files:\n",
    "        if file.endswith('answer.png'):\n",
    "            all_imgs.append([root / file, root / 'prediction.png'])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "idx = random.choice(range(len(all_imgs)))\n",
    "fig, axs = plt.subplots(1, 2, figsize=(2*8, 2*8))   \n",
    "img0 = mpimg.imread(all_imgs[idx][0]) \n",
    "img1 = mpimg.imread(all_imgs[idx][1]) \n",
    "axs[0].imshow(img0)\n",
    "axs[1].imshow(img1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "os.system(f'cp {str(all_imgs[idx][0])} tmp/{idx}_ref.png')\n",
    "os.system(f'cp {str(all_imgs[idx][1])} tmp/{idx}_pred.png')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import datasets\n",
    "ds = datasets.Dataset.from_parquet(\"/data03/starmage/datasets/cc/final/data/02057.parquet\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ds[0]['image']"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "uicoder",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
