import requests
import os
from datetime import datetime
from bs4 import BeautifulSoup
import zipfile

COSPA_COM = "https://www.cospa.com/index.php?stock=0&stock=1&act=itemlist&searchwords=1&brand=www.cospa.com&words="
NIGIGEN_COM = "https://nijigencospa.com/detail/id/"
COSPA_DIR = 'download/cospa/'
BCONLINE_DIR = 'download/bc-online/'

def download_image(urlArr, save_dir):
    # print(urlArr)
    img_path = []
    filenameArr = []
    for url in urlArr:
        splitted = url.split('/')
        filename = splitted[len(splitted)-1]
        response = requests.get(url)
        if response.status_code == 200:
            with open(os.path.join(save_dir, filename), "wb") as f:
                f.write(response.content)
            print("图片下载成功:", filename)
        else:
            print("图片下载失败")
    return {'img_path':img_path,'filename':filenameArr}

def get_cospa_result(data):
    barcode = data['barcode']

    url = COSPA_COM + barcode
    print(url)
    response = requests.get(url)
    if response.status_code != 200:
        exit()

    soup = BeautifulSoup(response.text,'html.parser')
    div_element = soup.find('div',{'class':'itembox'})
    a_tag = div_element.find('a', recursive=False)
    a_href = a_tag['href']
    a_href_arr = a_href.split("/")
    nid = a_href_arr[len(a_href_arr)-1]

    nigi_url = NIGIGEN_COM + nid
    print(nigi_url)
    response = requests.get(nigi_url)
    if response.status_code != 200:
        exit()

    soup = BeautifulSoup(response.text,'html.parser')
    div_element = soup.find_all('div',{'class':'sp-thumbnail'})
    img_srcs = [img['src'] for div in div_element for img in div.find_all('img')]

    # 生成文件夹
    now = datetime.now()
    year = now.year
    month = now.month
    day = now.day
    today = str(year) + '-' + str(month) + '-' + str(day)
    ymd = os.path.join(COSPA_DIR, today)
    save_dir = os.path.join(ymd, barcode)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    download_image(img_srcs,save_dir)
    return {"barcode":barcode,"images":img_srcs,"date":today}

# 创建一个滚去后的文件路径
def create_zip(zip_filename, folder_to_zip):
    # 创建一个 zip 文件，'w' 表示写入模式
    with zipfile.ZipFile(zip_filename, 'w') as zip_file:
        # 遍历指定文件夹中的所有文件
        for foldername, subfolders, filenames in os.walk(folder_to_zip):
            for filename in filenames:
                # 创建文件的完整路径
                file_path = os.path.join(foldername, filename)
                # 将文件写入 zip 文件
                zip_file.write(file_path, os.path.relpath(file_path, folder_to_zip))
    print(f"{zip_filename} 创建成功!")


def get_bconline_result(url):
    code = url.rsplit("/",1)[-1]
    print(url,code)
    response = requests.get(url)
    if response.status_code != 200:
        exit()
    soup = BeautifulSoup(response.text,'html.parser')
    div_element = soup.find(id='fs_productCarouselMainImage')
    # print(div_element)
    img_srcs = [img['src'].split("?t")[0]  for img in div_element.find_all('img')]

    # 生成文件夹
    now = datetime.now()
    year = now.year
    month = now.month
    day = now.day
    today = str(year) + '-' + str(month) + '-' + str(day)
    ymd = os.path.join(BCONLINE_DIR, today)
    save_dir = os.path.join(ymd, code)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    download_image(img_srcs,save_dir)
    return {"barcode":code,"images":img_srcs,"date":today}
        