#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :down_images.py
# @Time      :2023/7/5
# @Author    :CL
# @email     :1037654919@qq.com

import time
import shutil
from lxml import html, etree
from bs4 import BeautifulSoup
import requests
import os
import re
import json
from multiprocessing import Pool
from utils import MongoDBUtil, get_html, mongo_manager
MongoDBUtil = MongoDBUtil()
car_body_url = mongo_manager("car_body_url", db="car_images")


# 代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}

headers = {
    "authority": "www.carbodydesign.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "referer": "https://www.carbodydesign.com/page/2/?s=car",
    "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
}


def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata))
    f.close()


def save_txt(txt_filename, data, PWD):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()


def save_image(name, url, PWD):
    print('save image:', url)
    # name = url.split('/')[-1]
    # 将图片以最后的数字+后缀命名，方便检查文件是否存在
    filename = PWD + name
    if os.path.isfile(filename):  # 如果文件已爬取，则不再重复爬取
        print("文件存在：", filename)
        return
    # urllib.request.urlretrieve(url, filename=path+filename)
    try:
        response = requests.get(url, proxies=proxies, stream=True, timeout=30)
        with open(filename, 'wb') as fd:
            fd.write(response.content)
        requests.session().close()
        return 1
    except  Exception as e:
        print(url,'保存图片失败',e)
        return 0


# images栏目部分 翻页
def get_page(tag, pageNo):
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "referer": "https://www.carbodydesign.com/page/2/?s=car",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
    }
    url = f"https://www.carbodydesign.com/{tag}/page/{pageNo}/"
    # urltt= 'https://www.carbodydesign.com/design-sketch-board/page/204/'
    response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
    print('response.url', response.url)
    requests.session().close()
    if response.text:
        return response.content.decode()


# 关键词爬取
def get_kw_image(kw, pageNo):
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "referer": "https://www.carbodydesign.com/page/2/?s=car",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.41"
    }
    url = f"https://www.carbodydesign.com/page/{pageNo}/?s={kw}"
    # urltt= 'https://www.carbodydesign.com/design-sketch-board/page/204/'
    response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
    print('response.url', response.url)
    requests.session().close()
    if response.text:
        # soups = BeautifulSoup(response.content.decode(), 'lxml')
        # datas = soups.find_all('div', class_='cbd-column item-post-grid')
        # print('len(datas):', len(datas))
        # print(datas[0])
        return response.content.decode()


def create_wenjianjia(basefile):
    if os.path.exists(basefile):
        print('文件夹已经存在')
    else:
        os.mkdir(basefile)


def down_images(seed):
    car_body_url = mongo_manager("car_body_url", db="car_images")
#     seed  = {
#   "_id": "https://www.carbodydesign.com/2016/10/ied-and-pininfarina-announce-new-concept-car-for-geneva-2017/",
#   "kw": "concept car",
#   "page": 2,
#   "url2": "https://www.carbodydesign.com/2016/10/ied-and-pininfarina-announce-new-concept-car-for-geneva-2017/",
#   "image_url": "/media/2016/10/IED-Students-and-the-Pininfarina-Cambiano-Concept-01-440x330.jpg"
# }
    url = seed['_id']
    image_url = seed['image_url']
    kw = seed['kw']
    print('begin 访问网页:', url)
    name = url.split('/')[-2]
    # 创建目录文件夹
    file = f'car_body_mondb/{kw}/'
    if os.path.exists(file):
        pass
    else:
        os.mkdir(file)
    #创建内容文件夹，如果有，表明已经爬取
    file = f'car_body_mondb/{kw}/{name}/'
    if os.path.exists(file):
        name = os.listdir(file)
        if len(name) > 1:
            print('文件夹已经存在,文件已经爬取')
            seed['status'] = 'success'
            car_body_url.updateOne({'_id': seed['_id']}, seed)
            car_body_url.close()
            return
    else:
        os.mkdir(file)
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58"
    }

    try:
        response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
        requests.session().close()

        soups = BeautifulSoup(response.text, 'lxml')
        tag_list = []
        description = ''
        title = ''
        images = []
        try:
            title = soups.find('div', id='article-top-box').find('h1').text
            # print(title)
        except Exception as e:
            print('no found title', e)
        try:
            datas = soups.find('div', class_='cbd-panel-body')
            tags = datas.find_all('a', rel='tag')
            for tag in tags:
                tag_list.append(tag.text)
            # print(tag_list)
        except Exception as e:
            print('no found tags', e)
        try:
            description = soups.find('div', id='article-top-box').find('div', class_='article-intro').text
            # print(description)
        except Exception as e:
            print('no found description', e)

        try:
            # 试图获取 IMAGE_GALLERY，如果失败，说明只有一张照片
            IMAGE_GALLERY = soups.find('div', class_='post-thumbnails-box').find_all('img')
            try:
                newurl = 'https://www.carbodydesign.com/' + 'gallery/' + url.split('https://www.carbodydesign.com/')[-1]
                url3 = newurl
                data = parse_gallery(url3)
                if data:
                    for i in data:
                        if "data-lazy-src" in str(i):
                            # print(i.get("data-lazy-src"))
                            url4 = 'https://www.carbodydesign.com' + i.get("data-lazy-src")
                            pattern = re.compile(r'-\d*?x\d*')
                            if pattern.search(url4):
                                tmp = pattern.search(url4).group()
                                url4 = url4.replace(str(tmp), '')
                            # print('url4',url4)
                            image_filename = url4.split('/')[-1]
                            txt_filename = image_filename.split('.jpg')[0] + '.txt'
                            # print('filename',filename)
                            txt_data = str(title) + ',' + str(description)
                            for tag in tag_list:
                                txt_data += ',' + str(tag)

                            result = save_image(image_filename, url4, PWD=file)
                            if result ==0:
                                continue
                            save_txt(txt_filename, txt_data, PWD=file)
                            images.append(
                                {
                                    "img_file": image_filename,
                                    "label_file": txt_filename,
                                    "tag": tag_list,
                                    "title": title,
                                    "description": description
                                })
                # 备用方法
                else:
                    for i in range(1,len(IMAGE_GALLERY)):
                        url3_bak = newurl + '{}/'.format(i)
                        try:
                            response = requests.get(url3_bak, headers=headers, proxies=proxies, timeout=10)
                            soups =BeautifulSoup(response.text,'lxml')
                            data =soups.find('div',id = 'single-image').find('img')
                            if data["data-lazy-src"]:
                                url4 = 'https://www.carbodydesign.com' + data.get("data-lazy-src")
                                pattern = re.compile(r'-\d*?x\d*')
                                if pattern.search(url4):
                                    tmp = pattern.search(url4).group()
                                    url4 = url4.replace(str(tmp), '')
                                # print('url4',url4)
                                image_filename = url4.split('/')[-1]
                                txt_filename = image_filename.split('.')[0] + '.txt'
                                # print('filename',filename)

                                txt_data = str(title) + ',' + str(description)
                                for tag in tag_list:
                                    txt_data += ',' + str(tag)
                                result= save_image(image_filename, url4, PWD=file)
                                if result == 0:
                                    continue
                                save_txt(txt_filename, txt_data, PWD=file)
                                images.append(
                                    {
                                        "img_file": image_filename,
                                        "label_file": txt_filename,
                                        "tag": tag_list,
                                        "title": title,
                                        "description": description
                                    })
                        except Exception as e:
                            print('备用链接',url3_bak,e)

            except Exception as e:
                print(e)

        except:
            print(url,'只有一张图片')
            try:
                url4 = 'https://www.carbodydesign.com' + image_url
                pattern = re.compile(r'-\d*?x\d*')
                if pattern.search(url4):
                    # print(pattern.search(url).group())
                    tmp = pattern.search(url4).group()
                    url4 = url4.replace(str(tmp), '')
                print(url4)
                # 图片文件名
                image_filename = url4.split('/')[-1]
                # txt文件名
                txt_filename = image_filename.split('.')[0] + '.txt'


                try:
                    title = soups.find('div', class_='cbd-panel floating-panel').find('h1').get_text()
                    print(title)
                except Exception as e:
                    print('no found title', e)
                try:
                    description = soups.find('div', class_='description-box').find('p').text
                    print(description)
                except Exception as e:
                    print('no found description', e)
                result = save_image(image_filename, url4, PWD=file)   # 保存图片
                if result==1:
                    txt_data = str(title) + ',' + str(description)
                    for tag in tag_list:
                        txt_data += ',' + str(tag)
                    save_txt(txt_filename, txt_data, PWD=file)   # 保存文本
                    images.append(
                        {
                            "img_file": image_filename,
                            "label_file": txt_filename,
                            "tag": tag_list,
                            "title": title,
                            "description": description
                        })
            except Exception as e:
                print('保存单张图片失败', e)
        jsondata = {"page_url": url, "images": images}
        save_summary(file, 'summary.json', jsondata)       # 保存jsondata
        seed['status'] = 'success'
        seed['lens'] = len(images)
        if len(images) == 0:
            print('没有下载成功的图片,删除创建的文件夹')
            if os.path.exists(file):
                shutil.rmtree(file)
            seed['status'] = 'unsuccess'
    except Exception as e:
        print('get url data fialed,删除创建的文件夹', e)
        if os.path.exists(file):
            shutil.rmtree(file)
        seed['status'] = 'unsuccess'
    car_body_url.updateOne({'_id': seed['_id']}, seed)
    car_body_url.close()

def test(seed):

    url = seed['_id']
    image_url = seed['image_url']
    kw = seed['kw']
    print('begin 访问网页:', url)
    name = url.split('/')[-2]
    # 创建目录文件夹
    file = f'car_body_mondb/{kw}/'
    if os.path.exists(file):
        print(file,'文件夹已经存在')
        pass
    else:
        os.mkdir(file)
        print(file, '文件夹创建成功')
    # 创建内容文件夹，如果有，表明已经爬取
    file = f'car_body_mondb/{kw}/{name}/'
    if os.path.exists(file):
        name = os.listdir(file)
        if len(name) > 1:
            print('文件夹已经存在,文件已经爬取')
            return
    else:
        os.mkdir(file)
        print(file, '文件夹创建成功')
    headers = {
        "authority": "www.carbodydesign.com",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "cache-control": "max-age=0",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\"",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58"
    }

    try:
        response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
        requests.session().close()
        soups = BeautifulSoup(response.text, 'lxml')
        tag_list = []
        description = ''
        title = ''
        images = []
        try:
            title = soups.find('div', id='article-top-box').find('h1').text
            # print(title)
        except Exception as e:
            print('no found title', e)
        try:
            datas = soups.find('div', class_='cbd-panel-body')
            tags = datas.find_all('a', rel='tag')
            for tag in tags:
                tag_list.append(tag.text)
            # print(tag_list)
        except Exception as e:
            print('no found tags', e)
        try:
            description = soups.find('div', id='article-top-box').find('div', class_='article-intro').text
            # print(description)
        except Exception as e:
            print('no found description', e)

        try:
            IMAGE_GALLERY = soups.find('div', class_='post-thumbnails-box').find_all('img')
            # print(IMAGE_GALLERY)
            try:
                newurl = 'https://www.carbodydesign.com/' + 'gallery/' + url.split('https://www.carbodydesign.com/')[-1]
                # for i  in range(1,len(IMAGE_GALLERY)+1):
                url3 = newurl
                url3_bak = newurl + '{}/'.format(1)
                # print(url3)
                data = parse_gallery(url3)
                if data:
                    # print(data)
                    for i in data:
                        pass
                        # if "data-lazy-src" in str(i):
                        #     # print(i.get("data-lazy-src"))
                        #     url4 = 'https://www.carbodydesign.com' + i.get("data-lazy-src")
                        #     pattern = re.compile(r'-\d*?x\d*')
                        #     if pattern.search(url4):
                        #         tmp = pattern.search(url4).group()
                        #         url4 = url4.replace(str(tmp), '')
                        #     print('url4',url4)

                else:
                    print('url:',url,data)
                seed['status'] = 'success'
            except Exception as e:
                print(e)
                seed['status'] = 'unsuccess'

        except:
            print('只有一张图片',url)

    except Exception as e:
        print('get url data fialed,删除创建的文件夹', e)
        if os.path.exists(file):
            shutil.rmtree(file)


def parse_gallery(url):
    for i in range(5):
        try:
            # url = 'https://www.carbodydesign.com/gallery/2022/05/win-an-iaad-scholarship-with-the-pininfarina-concept-car/1/'
            print('begin:', url)
            response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
            # print(response.text)
            soups = BeautifulSoup(response.text, 'lxml')
            try:
                datas = soups.find("div", class_="gallery-thumbnails")
                img = datas.find_all('img')
                return img
            except:
                datas = soups.find_all("div", class_="item-box-ext")
                img = []
                for data in datas:
                    img.append(data.find('div',class_='item-image-box').find('img'))
                # print(url,img)
                return img

        except:
            time.sleep(5)
    print(url, 'no got gallery img')
    return None

if __name__ == '__main__':

    pool = Pool(processes=10)
    seeds = car_body_url.findAll({"status": 'unsuccess'})
    lists =[]
    for seed in seeds:
        lists.append(seed)
    print(len(lists))
    car_body_url.close()
    # run
    pool.map(down_images, lists)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()

    # test
    # pool.map(test, lists)
    # pool.close()  # 关闭进程池，不再接受新的进程
    # pool.join()

    # 恢复mongodb数据
    # car_body_url = mongo_manager("car_body_url", db="car_images")
    #
    # seeds = car_body_url.findAll({"status": 'success'})
    # lists =[]
    # for seed in seeds:
    #     seed['status'] = None
    #     car_body_url.updateOne({'_id': seed['_id']}, seed)
    # car_body_url.close()