import os
import requests
from bs4 import BeautifulSoup
from docx import Document
from PIL import Image
from io import BytesIO
import re


def extract_ss_value(script_content):
    # example: var ss = '10.0001/0001.0001.0254.0001.0001';
    match = re.search(r"var ss = '10.0001/([\d\./]+)';", script_content)
    if match:
        ss_value = match.group(1).replace('.', '/')
        return "10.0001/" + ss_value
    return None

def fetch_html(url):
    response = requests.get(url)
    response.raise_for_status()
    return response.text

def parse_html(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    container = soup.find('div', class_='singe_desc')
    image = container.find('img')['src']
    description = container.find('span')
    return [image], [description]

def parse_html_modals(html_content):
    soup = BeautifulSoup(html_content, 'html.parser')
    modals = soup.find_all('div', class_='modal fade')
    imgs, descs, titles = [], [], []
    for modal in modals:
        title = modal.find('h1', class_='modal-title')
        if title is None:
            print("Warning: title is None, skip")
            continue
        print(f"Parsing: title ({title.text.strip()}) catched")
        
        script = modal.find('script', string=re.compile(r"var ss = '"))
        if script is None:
            print("Warning: script is None, skip")
            continue
        ss_value = extract_ss_value(script.string)
        img_name ="/01/1/index.tiles/preview.jpg"
        img = "https://cdn.e-dunhuang.com/SOURCES/" + ss_value + img_name

        desc = modal.find('p')
        
        if title is None or img is None or desc is None:
            print("Warning: desc is None, skip")
            continue
        titles.append(title)
        imgs.append(img)
        descs.append(desc)
    return imgs, descs, titles

def save_to_docx(images, descriptions, output_files):
    '''
    images: list of image urls
    descriptions: list of html text obj
    output_files: list of output file path
    '''
    for img_url, desc, output_file in zip(images, descriptions, output_files):
        if img_url is None or desc is None or output_file is None:
            print(f"Warning: img({img_url}), desc or output_file({output_file}) is None, skip")
            continue
        doc = Document()
        
        if img_url.startswith("//"):
            img_url= "https:" + img_url
        print(" Download img from", img_url)
        response = requests.get(img_url)
        img_data = BytesIO(response.content)
        doc.add_picture(img_data)
    
        text_content = desc.text.strip()
        print(" Text content:", text_content[:10], "...")
        doc.add_paragraph(text_content)
    
        if os.path.exists(output_file):
            print("Warning: output file already exists, will be overwritten")
            os.remove(output_file)
        doc.save(output_file)
        print(f" Image and description have been saved to {output_file}")


def save_image(images, output_files):
    for img_url, output_file in zip(images, output_files):
        if img_url is None or output_file is None:
            print(
                f"Warning: img({img_url}) or output_file({output_file}) is None, skip")
            continue

        if img_url.startswith("//"):
            img_url = "https:" + img_url
        response = requests.get(img_url)
        img_data = BytesIO(response.content)
        img = Image.open(img_data)
        img.save(output_file)
        print(f" Image has been saved to {output_file}")


def save_text(descriptions, output_files):
    for desc, output_file in zip(descriptions, output_files):
        if desc is None or output_file is None:
            print(f"Warning: desc or output_file({output_file}) is None, skip")
            continue
        text_content = desc.text.strip()
        with open(output_file, 'w', encoding='utf-8') as f:
            f.write(text_content)
        print(f" Description has been saved to {output_file}")


def build_dunhuang(url, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    html_content = fetch_html(url)
    images, descriptions = parse_html(html_content)
    idx = url.split('.')[-1]
    title = "莫高窟第"+str(int(idx)).zfill(3)+"窟"
    output_files = [os.path.join(output_dir, f"{title}.docx")]
    save_to_docx(images, descriptions, output_files)
    img_output_files = [os.path.join(output_dir, f"{title}.png")]
    save_image(images, img_output_files)
    txt_output_files = [os.path.join(output_dir, f"{title}.txt")]
    save_text(descriptions, txt_output_files)
    print(f"Main image and description is done")
    
    images, descriptions, titles = parse_html_modals(html_content)
    # filter bad spaces
    output_files = [os.path.join(
        output_dir, f"{output_file.text.strip().replace(' ', ' ').replace('  ', ' ').replace(' ', '_')}.docx") for output_file in titles]
    save_to_docx(images, descriptions, output_files)
    img_output_files = [os.path.join(
        output_dir, f"{output_file.text.strip().replace(' ', ' ').replace('  ', ' ').replace(' ', '_')}.png") for output_file in titles]
    save_image(images, img_output_files)
    txt_output_files = [os.path.join(
        output_dir, f"{output_file.text.strip().replace(' ', ' ').replace('  ', ' ').replace(' ', '_')}.txt") for output_file in titles]
    save_text(descriptions, txt_output_files)
    print(f"Details images and descriptions is done")
    print(f">>> {idx} is done: data has been saved to {output_dir}\n")


def main():
    id = ['0023', '0254']
    for idx in id:
        url = f"https://www.e-dunhuang.com/cave/10.0001/0001.0001.{idx}"
        output_dir = f"c:\\users\\mingqihu\\dunhuang-data\\{idx}"
        build_dunhuang(url, output_dir)

if __name__ == "__main__":
    main()
