#!/usr/bin/python3
# author:cl
# 2023年06月27日
# Email:1037654919@qq.com
import time
import shutil
from lxml import html, etree
from bs4 import BeautifulSoup
import requests
import os
import re
import json

# 代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}

headers = {
    "authority": "www.carbodydesign.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "referer": "https://www.carbodydesign.com/page/2/?s=car",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
}


def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata))
    f.close()


def save_txt(txt_filename, data, PWD):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()


def save_image(name, url, PWD='car_body/car/'):
    print('save image:', url)
    # name = url.split('/')[-1]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print("文件存在：", filename)
        return
    # urllib.request.urlretrieve(url, filename=path+filename)
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
    except  Exception as e:
        print(e)


# images栏目部分 翻页
def get_page(tag, pageNo):
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "referer": "https://www.carbodydesign.com/page/2/?s=car",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
    }
    url = f"https://www.carbodydesign.com/{tag}/page/{pageNo}/"
    # urltt= 'https://www.carbodydesign.com/design-sketch-board/page/204/'
    response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
    print('response.url', response.url)
    requests.session().close()
    if response.text:
        return response.content.decode()


# 关键词爬取
def get_kw_image(kw, pageNo):
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "referer": "https://www.carbodydesign.com/page/2/?s=car",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
    }
    url = f"https://www.carbodydesign.com/page/{pageNo}/?s={kw}"
    # urltt= 'https://www.carbodydesign.com/design-sketch-board/page/204/'
    response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
    print('response.url', response.url)
    requests.session().close()
    if response.text:
        # soups = BeautifulSoup(response.content.decode(), 'lxml')
        # datas = soups.find_all('div', class_='cbd-column item-post-grid')
        # print('len(datas):', len(datas))
        # print(datas[0])
        return response.content.decode()


def create_wenjianjia(basefile):
    if os.path.exists(basefile):
        print('文件夹已经存在')
    else:
        os.mkdir(basefile)


def parse_url(url, image_url, kw):
    # url  = 'https://www.carbodydesign.com/2022/05/win-an-iaad-scholarship-with-the-pininfarina-concept-car/'
    print('begin:', url)
    name = url.split('/')[-2]
    # print(url)
    file = f'car_body/{kw}/{name}/'
    if os.path.exists(file):
        name = os.listdir(file)
        if len(name) > 1:
            print('文件夹已经存在,文件已经爬取')
            return
    else:
        os.mkdir(file)
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58"
    }
    cookies = {
        "_ga": "GA1.1.915657177.1687862302",
        "_cc_id": "6b986decaf7cd340dc6120074a861af9",
        "__utmz": "74397312.1688030161.2.2.utmcsr=bing|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)",
        "panoramaId_expiry": "1688116562434",
        "panoramaId": "b30650d9c6441682c7d40cfb6384a9fb927a0046fc0a801d67738930e2d9d019",
        "panoramaIdType": "panoDevice",
        "PHPSESSID": "cf5a71fe3851500b3e79664b2d810d41",
        "__utma": "74397312.915657177.1687862302.1688030161.1688108192.3",
        "__utmc": "74397312",
        "__gads": "ID=b1609b542052067e-22f0b629adb400c0:T=1687862302:RT=1688109872:S=ALNI_MZzrKgKdp9hDgFPE-kUgPWNMc53ew",
        "__gpi": "UID=00000c724da4dc41:T=1687862302:RT=1688109872:S=ALNI_MY44JFLMqa6VgoUQ6R-q0cAZiGSrA",
        "__utmb": "74397312.19.10.1688108192",
        "_ga_JTW9GYLG4B": "GS1.1.1688108192.3.1.1688110046.0.0.0"
    }

    try:
        response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
        requests.session().close()
        soups = BeautifulSoup(response.text, 'lxml')
        tag_list = []
        description = ''
        title = ''
        flag = 1
        images = []
        try:
            title = soups.find('div', id='article-top-box').find('h1').text
            print(title)
        except Exception as e:
            print('no found title', e)
        try:
            datas = soups.find('div', class_='cbd-panel-body')
            tags = datas.find_all('a', rel='tag')
            for tag in tags:
                tag_list.append(tag.text)
            # print(tag_list)
        except Exception as e:
            print('no found tags', e)
        try:
            description = soups.find('div', id='article-top-box').find('div', class_='article-intro').text
            print(description)
        except Exception as e:
            print('no found description', e)
        try:
            IMAGE_GALLERY = soups.find('div', class_='post-thumbnails-box').find_all('img')
            # print(IMAGE_GALLERY)
            if len(IMAGE_GALLERY)>2:
                for IMAGE in IMAGE_GALLERY:
                    if "data-lazy-src" in str(IMAGE):
                        # print(i.get("data-lazy-src"))
                        url4 = 'https://www.carbodydesign.com' + IMAGE.get("data-lazy-src")
                        pattern = re.compile(r'-\d*?x\d*')
                        if pattern.search(url4):
                            tmp = pattern.search(url4).group()
                            url4 = url4.replace(str(tmp), '')
                        image_filename = url4.split('/')[-1]
                        txt_filename = image_filename.split('.')[0] + '.txt'
                        txt_data = str(title) + ',' + str(description)
                        for tag in tag_list:
                            txt_data += ',' + str(tag)
                        save_image(image_filename, url4, PWD=file)
                        save_txt(txt_filename, txt_data, PWD=file)
                        images.append(
                            {
                                "img_file": image_filename,
                                "label_file": txt_filename,
                                "tag": tag_list,
                                "title": title,
                                "description": description
                            })
            else:
                newurl = 'https://www.carbodydesign.com/' + 'gallery/' + url.split('https://www.carbodydesign.com/')[-1]
                # for i  in range(1,len(IMAGE_GALLERY)+1):
                url3 = newurl
                url3_bak = newurl + '{}/'.format(1)
                # print(url3)
                data = parse_gallery(url3)
                if data:
                    for i in data:
                        if "data-lazy-src" in str(i):
                            # print(i.get("data-lazy-src"))
                            url4 = 'https://www.carbodydesign.com' + i.get("data-lazy-src")
                            pattern = re.compile(r'-\d*?x\d*')
                            if pattern.search(url4):
                                tmp = pattern.search(url4).group()
                                url4 = url4.replace(str(tmp), '')
                            # print('url4',url4)
                            image_filename = url4.split('/')[-1]
                            txt_filename = image_filename.split('.')[0] + '.txt'
                            # print('filename',filename)

                            txt_data = str(title) + ',' + str(description)
                            for tag in tag_list:
                                txt_data += ',' + str(tag)
                            save_image(image_filename, url4, PWD=file)
                            save_txt(txt_filename, txt_data, PWD=file)
                            images.append(
                                {
                                    "img_file": image_filename,
                                    "label_file": txt_filename,
                                    "tag": tag_list,
                                    "title": title,
                                    "description": description
                                })

        except:
            print('只有一张图片')
            flag = 0
            try:
                url4 = 'https://www.carbodydesign.com' + image_url
                print(url4)
                pattern = re.compile(r'-\d*?x\d*')
                if pattern.search(url4):
                    # print(pattern.search(url).group())
                    tmp = pattern.search(url4).group()
                    url4 = url4.replace(str(tmp), '')
                print(url4)
                image_filename = url4.split('/')[-1]
                txt_filename = image_filename.split('.')[0] + '.txt'
                save_image(image_filename, url4, PWD=file)

                try:
                    title = soups.find('div', class_='cbd-panel floating-panel').find('h1').get_text()
                    print(title)
                except Exception as e:
                    print('no found title', e)
                try:
                    description = soups.find('div', class_='description-box').find('p').text
                    print(description)
                except Exception as e:
                    print('no found description', e)
                txt_data = str(title) + ',' + str(description)
                for tag in tag_list:
                    txt_data += ',' + str(tag)
                save_txt(txt_filename, txt_data, PWD=file)
                images.append(
                    {
                        "img_file": image_filename,
                        "label_file": txt_filename,
                        "tag": tag_list,
                        "title": title,
                        "description": description
                    })
            except Exception as e:
                print('保存单张图片失败', e)

        jsondata = {"page_url": url, "images": images}
        save_summary(file, 'summary.json', jsondata)
        if len(images) == 0:
            print('没有下载成功的图片,删除创建的文件夹')
            if os.path.exists(file):
                shutil.rmtree(file)
        else:
            urllist.append(url)
    except Exception as e:
        print('get url data fialed,删除创建的文件夹', e)
        if os.path.exists(file):
            shutil.rmtree(file)


def parse_gallery(url):
    try:
        # url = 'https://www.carbodydesign.com/gallery/2022/05/win-an-iaad-scholarship-with-the-pininfarina-concept-car/1/'
        print('begin:', url)
        response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
        # print(response.text)
        soups = BeautifulSoup(response.text, 'lxml')
        datas = soups.find("div", class_="gallery-thumbnails")
        img = datas.find_all('img')

        return img
    except:
        print('no got gallery img')
        return None
    # print(img)


urllist = []


def main():
    kws = [
        'vehicle'
    ]
    page_info = { 'concept car': 216, 'futuristic car': 33,
                  'car': 300,'vehicle':164,
                  'auto':187,'automobile':30,
                  'small car':43,'truck':14,

                  'Electric Concept car': 50,
                 }

    for kw in kws:
        path = f'car_body/{kw}/'
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)
        for page in range(11, 164):
            time.sleep(10)
            print('begin:page', kw, page)

            try:
                response = get_kw_image(kw=kw, pageNo=page)
                soups = BeautifulSoup(response, 'lxml')
                # print(soups)
                datas = soups.find_all('div', class_='cbd-column item-post-grid')
                # print(datas[0])
                print('len(datas):', len(datas))
                if len(datas) == 0:
                    break
                for data in datas:
                    try:
                        # print(data)
                        url2 = data.find('a')['href']
                        image_url = data.find('a').find('img')['data-lazy-src']
                        print('image_url:', image_url)
                        if url2 in done_list:
                            continue

                        parse_url(url2, image_url, kw)

                        # break
                    except Exception as E:
                        print(E)
            except Exception as E:
                print(E)
    with open(f'car_body/{kw}/urllist.txt', 'a') as f:
        for line in urllist:
            f.write(line + '\n')
    f.close()


if __name__ == '__main__':
    # url = 'https://www.carbodydesign.com/tutorial/71090/concept-car-digital-sketching-tutorial/'
    # parse_url(url,kw = 'concept car')
    # url = 'https://www.carbodydesign.com/gallery/2022/05/win-an-iaad-scholarship-with-the-pininfarina-concept-car/1/'
    # parse_gallery(url)
    done_list =[]
    with open('car_body/concept car/urllist.txt') as f:
        for line in f:
            done_list.append(line.strip('\n'))
    f.close()
    print('len(donelist):', len(done_list))
    main()
