#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :formtrends_util.py
# @Time      :2023/6/29 
# @Author    :CL
# @email     :1037654919@qq.com

import time
import os
import requests
import time
import shutil
import json
from utils import MongoDBUtil, get_html, mongo_manager
from bs4 import BeautifulSoup
from pprint import pprint
from multiprocessing import Pool

MongoDBUtil = MongoDBUtil()

formtrends_url = mongo_manager("formtrends_url", db="car_images")
# 代理 pigcha
proxies = {'http': '127.0.0.1:15732',
           'https': '127.0.0.1:15732'}

headers = {
    "Referer": "https://www.formtrends.com/rolls-royce-returns-to-coachbuilding-roots-with-new-boat-tail/",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58",
    "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Linux\""
}


def get_main_page(url):
    headers = {
        "Referer": "https://www.formtrends.com/rolls-royce-returns-to-coachbuilding-roots-with-new-boat-tail/",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58",
        "sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Microsoft Edge\";v=\"114\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\""
    }
    for __ in range(5):
        try:
            response = requests.get(url, headers=headers, proxies=proxies, timeout=10)
            requests.session().close()
            # print(response.text)
            # print(response)
            if response.status_code == 200:
                return response
            time.sleep(5)
        except Exception as e:
            print(e)
    return None


def get_page(url):
    print('begin get：', url)
    for __ in range(5):
        try:
            response = requests.get(url, headers=headers, proxies=proxies, timeout=5)
            requests.session().close()
            if response.status_code == 200:
                return response
            time.sleep(3)
        except Exception as e:
            print(e)

    return None


def parse_data_page(response):
    soups = BeautifulSoup(response.text, 'lxml')
    title = ''
    tags = []
    imgs = []
    desc = ''
    try:
        '<div class="title-wrap"><h1 class="entry-title title flipboard-title">Top 3 Concept Cars at Frankfurt IAA 2019</h1>'
        titles = soups.find_all('div', class_='title-wrap')
        for tt in titles:
            try:
                title = tt.find('h1').get_text()
                print(title)
                break
            except:
                pass
    except  Exception as e:
        print('no find title ', e)
    try:
        duanluo = soups.find('div', class_='tipi-cols clearfix sticky--wrap').find_all('p')
        for p in duanluo:
            try:
                desc = p.get_text()
                break
            except:
                pass
    except Exception as e:
        print('no find desc ', e)
    try:
        Tags = soups.find('footer', class_='entry-footer').find('div',
                                                                class_='post-tags footer-block-links clearfix').find(
            'div', class_='block-elements').find_all('a')
        for a in Tags:
            try:
                tags.append(a.get_text())
            except:
                pass
    except Exception as e:
        print('no find tags ', e)
    try:
        img = soups.find('div', class_='hero').find('img')
        src = str(img['src']).replace('i1.wp.com/', '').replace('i0.wp.com/', '').\
        replace('i2.wp.com/','').split('?')[0]
        imgs.append(src)
    except Exception as e:
        print('no find img ', e)
    try:
        figures = soups.find('div', class_='tipi-cols clearfix sticky--wrap').find_all('figure')
        for p in figures:
            try:
                img = \
                    str(p.find('img')['src']).replace('i1.wp.com/', '').\
                        replace('i0.wp.com/', '').replace('i2.wp.com/','').split('?')[0]
                imgs.append(img)
                # break
            except:
                pass
    except Exception as e:
        print('no find figures ', e)
    # 下面的图片下载失败。有空再查原因
    if len(imgs) <= 1:
        try:
            gallerys = soups.find_all('div',class_='gallery-row')
            # print('gallerys',gallerys)
            for gallery in gallerys:
                galleryimgs= gallery.find_all('img')
                for galleryimg in galleryimgs :
                    try:
                        img =str(galleryimg['src']).replace('i1.wp.com/', '').\
                            replace('i0.wp.com/', '').replace('i2.wp.com/','').split('?')[0]
                        imgs.append(img)
                        # break
                    except:
                        pass
        except Exception as e:
            print('no find imgs ', e)
    return {'title': title, 'tags': tags, 'desc': desc, 'imgs': imgs}


def parse_main_page(response):
    soups = BeautifulSoup(response.text, 'lxml')
    datas = soups.find('div', class_='block block-61 tipi-flex preview-review-bot').find_all('article')
    urls = []
    for data in datas:
        try:
            urls.append(data.find('a', class_='mask-img')['href'])
        except Exception as e:
            print(e)
    return urls


# print(response.text)
def down_image(file, url):  # 下载图片方法
    print("开始下载：", url)
    for i in range(3):
        try:
            response = requests.get(url, proxies=proxies, timeout=10)
            with open(file, 'wb') as fd:
                fd.write(response.content)
            requests.session().close()
            return 1
        except Exception as e:
            time.sleep(5)
    print("下载失败了", url)
    return 0


def save_txt(PWD, txt_filename, data):
    with open(PWD + txt_filename, 'w') as f:
        f.write(data)
    f.close()


def save_summary(path, file, jsondata):
    with open(path + file, 'w') as f:
        f.write(json.dumps(jsondata))
    f.close()


def save_data(insert_data):
    # 请求 获取单个页的数据
    formtrends_url = mongo_manager("formtrends_url", db="car_images")
    url = insert_data['url']
    try:
        key_word = insert_data['key_word']
    except:
        key_word = 'galleries'
    pathname = str(url).rsplit('/', 2)[-2]
    path = f'formtrends/{key_word}/{pathname}/'
    images = []
    if os.path.exists(path):
        name = os.listdir(path)
        if len(name) > 2 and 'summary.json' in name:
            print('文件夹已经存在,文件已经爬取，更新数据库')
            insert_data['status'] = 'success'
            formtrends_url.updateOne({'_id': insert_data['_id']}, insert_data)
            formtrends_url.close()
            return
        else:
            print('删除文件夹，重跑数据', path)
            shutil.rmtree(path)
            os.mkdir(path)
    else:
        os.mkdir(path)
    for img_src in insert_data['imgs']:
        filename = str(img_src).rsplit('/', 1)[-1]
        result = down_image(file=str(path) + str(filename), url=img_src)
        if result == 0:
            continue
        txt_filename = filename.rsplit('.', 1)[0] + '.txt'
        txt_data = str(insert_data['title']) + ',' + str(insert_data['desc'])

        for tag in insert_data['tags']:
            txt_data += ',' + str(tag)
        save_txt(PWD=path, txt_filename=txt_filename, data=txt_data)
        images.append({
            "img_file": filename,
            "label_file": txt_filename,
            "tag": insert_data['tags'],
            "title": insert_data['title'],
            "description": insert_data['desc']
        })

    if len(images) > 0:
        jsondata = {"page_url": url, "images": images}
        save_summary(path=path, file='summary.json', jsondata=jsondata)
        # 保存爬取信息到达mongodb
        insert_data['status'] = 'success'
        insert_data['lens'] = len(images)
    else:
        insert_data['status'] = 'unsuccess'
        shutil.rmtree(path)
    formtrends_url.updateOne({'_id': insert_data['_id']}, insert_data)
    formtrends_url.close()


#
def get_galleries():
    # 获取类似 url = 'https://www.formtrends.com/category/galleries/page/{}/' 的数据
    maxpage = 65

    path = f'formtrends/galleries/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)
    for page in range(1, maxpage + 1):
        print('begin page:', page)
        url = 'https://www.formtrends.com/category/galleries/page/{}/'.format(page)
        response = get_main_page(url)
        if response:
            urls = parse_main_page(response)
            print(page, urls)
            for url2 in urls:
                response = get_page(url2)
                # print(response.text)
                if response:
                    datas = parse_data_page(response)
                    datas['page'] = page
                    datas['url'] = url2
                    datas['_id'] = url2
                    datas['key_word'] = 'galleries'
                    print(datas['imgs'])
                    # try:
                    #     MongoDBUtil.insert_one('formtrends_url', datas)
                    # except Exception as e:
                    #     print(e)
                    #     print('已经有数据，跳过')
                        # continue
                    # pprint(datas)
                    save_data(datas)
                # break

def main(page_info: dict):
    # 获取类似  url = f'https://www.formtrends.com/category/cars/{key_word}/page/{page}/' 的数据
    for key_word, maxpage in page_info.items():
        path = f'formtrends/{key_word}/'
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)
        for page in range(1, maxpage + 1):
            print('begin page:', page)
            url = f'https://www.formtrends.com/category/cars/{key_word}/page/{page}/'
            response = get_main_page(url)
            if response:
                urls = parse_main_page(response)
                print(page, urls)
                for url2 in urls:
                    response = get_page(url2)
                    # print(response.text)
                    if response:
                        datas = parse_data_page(response)
                        datas['page'] = page
                        datas['key_word'] = key_word
                        datas['url'] = url2
                        datas['_id'] = url2
                        # try:
                        #     MongoDBUtil.insert_one('formtrends_url', datas)
                        # except Exception as e:
                        #     print(e)
                        #     print('已经有数据，跳过')
                        #     continue
                        # pprint(datas)
                        save_data(datas)
                    # break

def kw_data(key_word):
    # 获取类似  url = f'https://www.formtrends.com/page/{page}/?s={key_word}' 的数据
    path = f'formtrends/{key_word}/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)
    baseurl = f'https://www.formtrends.com/page/1/?s={key_word}'
    response = get_main_page(baseurl)
    soups = BeautifulSoup(response.text, 'lxml')
    results = soups.find('div', class_='results-count').get_text()
    maxpage = int(str(results).split()[0])
    print('maxpage:',key_word, maxpage)
    for page in range(1, int(maxpage / 6) + 1):
        print('begin page:', key_word,page)
        url = f'https://www.formtrends.com/page/{page}/?s={key_word}'
        response = get_main_page(url)
        if response:
            urls = parse_main_page(response)
            print(page, urls)
            for url2 in urls:
                datas = dict()
                response = get_page(url2)
                # print(response.text)
                if response:
                    datas = {}
                    datas = parse_data_page(response)
                    datas['page'] = page
                    datas['key_word'] = key_word
                    datas['url'] = url2
                    datas['_id'] = url2
                    # try:
                    #     MongoDBUtil.insert_one('formtrends_url', datas)
                    # except Exception as e:
                    #     print(e)
                    #     print('已经有数据，跳过')
                    #     continue
                    # pprint(datas)
                    save_data(datas)
                # break


def mongodb_seed():
    # 从数据库获取未成功的数据作为种子
    formtrends_url = mongo_manager("formtrends_url", db="car_images")
    seeds = formtrends_url.findAll({'status': 'unsuccess'})

    lists = []
    for seed in seeds:
        lists.append(seed)
    print('len(seeds):', len(lists))
    formtrends_url.close()
    # run
    pool = Pool(processes=10)
    pool.map(get_data, lists)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()

    # # test
    # for ll in lists[:100]:
    #     get_data(ll)


def get_data(seed: dict):
    # 根据数据库种子尝试获取数据
    datas = seed
    url2 = datas['url']
    response = get_page(url2)
    # print(response.text)
    if response:
        # print(response.text)
        datas.update(parse_data_page(response))
        datas['url'] = url2
        datas['_id'] = url2
        print("datas['imgs']:", datas['imgs'])
        # pprint(datas)
        save_data(datas)


def test():
    # test
    key_word = 'concept car'
    max_page = 42
    page = 1
    url = f'https://www.formtrends.com/page/{page}/?s={key_word}'
    response = get_main_page(url)
    # print(response.text)
    if response:
        urls = parse_main_page(response)
        print(page, urls)
        for url2 in urls:
            response = get_page(url2)
            # print(response.text)
            if response:
                datas = {}
                datas = parse_data_page(response)
                datas['page'] = page
                datas['key_word'] = key_word
                datas['url'] = url2
                datas['_id'] = url2
                print(datas)
                # try:
                #     MongoDBUtil.insert_one('formtrends_url', datas)
                # except Exception as e:
                #     print(e)
                #     print('已经有数据，跳过')
                #     continue
                # # pprint(datas)
                # save_data(datas)
            # break


if __name__ == "__main__":

    path = f'formtrends/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)

    page_info = {'concept-cars': 42}
    # 关键词  formtrends更喜欢词语标准写法，不要连写，如conceptcar，其搜索结果为0
    kws = [ 'concept car', 'futuristic car', 'car',
             'sports car','super car',
            'Automotive',
            'fantastic car', 'amazing car',
            'vehicle car', 'vehicle',
            'wonderful car', 'beautiful car',
            'car design'
            ]

    # # 获取类似 url = 'https://www.formtrends.com/category/galleries/page/{}/' 的数据
    # get_galleries()
    # # main 获取类似  url = f'https://www.formtrends.com/category/cars/{key_word}/page/{page}/' 的数据
    # main(page_info)
    # kw_data 获取类似  url = f'https://www.formtrends.com/page/{page}/?s={kw}' 的数据
    pool = Pool(processes=len(kws))
    pool.map(kw_data, kws)
    pool.close()  # 关闭进程池，不再接受新的进程
    pool.join()
    # # 从数据库获取未成功的数据作为种子
    # mongodb_seed()

    # test()
