import re
import os
import csv
import glob
import shutil
import traceback
import platform
import webbrowser
import translators as ts
import PySimpleGUI as sg
from appdirs import AppDirs
from requests_html import HTMLSession, HTML, requests
from urllib.parse import quote

sg.ChangeLookAndFeel('DarkAmber')

__version__ = '0.0.3'

PRICE_SCALE = 1.4

PRIVATE_PAIR_NAMES = [
    '建议零售价', '主图来源', '货号', '主要销售地区', '主面料产地是否进口',
    '是否外贸', '是否跨境出口专供货源', '上市年份/季节', '主要下游平台',
    '上市年份季节', '加盟分销门槛'
]

NO_USE_PAIR_NAMES = [
    '货源类别', '产地', '库存类型', '是否库存', '品牌', '主面料产地', '是否外贸', 
    '主要下游平台', '主要销售地区', '是否跨境出口专供货源', '商品编码', '加盟分销门槛',
    '主图来源', '是否支持分销', '有可授权的自有品牌',
]

re_pattern = re.compile(r'"(.*?)"', re.S)

re_pattern2 = re.compile(r'"original":"(.*?)"', re.S)

main_search_url = 'https://s.1688.com/selloffer/offer_search.htm?keywords={keyword}&n=y&netType=1%2C11&beginPage={page}'

browser_user_agent = r"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36"

PRICE_FOR_WEIGHT = {
    "0.5": 130,
    "1": 150,
    "1.5": 180,
    "2": 220,
    "2.5": 240,
    "3": 280,
    "3.5": 280,
    "4": 300,
    "4.5": 330,
    "5": 360,
    "5.5": 400,
    "6": 420,
    "6.5": 450,
    "7": 500,
    "8": 540,
    "8.5": 580,
    "9": 600,
    "9.5": 640,
    "10": 680 
}


def check_if_install_pyppeteer_chromium():
    appdir = AppDirs().user_data_dir
    cwd = os.getcwd()
    if not os.path.exists(os.path.join(appdir, 'pyppeteer')):
        print("检测到当前无法还未安装chromium，正在尝试安装")
        chromium_path = os.path.join(cwd, 'pyppeteer')
        if os.path.exists(chromium_path):
            try:
                shutil.copytree(chromium_path, os.path.join(appdir, 'pyppeteer'))
            except:
                print("复制依赖失败")
        else:
            print("找不到可用的chromium，无法安装依赖")


def encode_chinese_characters(chinese_characters: str, encoding='gb2312'):
    return quote(chinese_characters, encoding=encoding)


def translate_string(string_to_translate: str):
    """将传入的字符串翻译成英文"""
    try:
        english_string = ts.google(string_to_translate, if_use_cn_host=True)
    except:
        try:
            english_string = ts.bing(string_to_translate, if_use_cn_host=True)
        except:
            # english_string = ts.baidu(string_to_translate)
            return None
    return english_string


def read_html(html_path: str) -> HTML:
    with open(html_path, 'rb') as fr:
        raw_html = fr.read().decode('gb18030')
    raw_html = raw_html.replace('GBK', 'UTF-8')
    raw_html = raw_html.replace('gbk', 'utf-8')
    html = HTML(html=raw_html)
    return html


class Spider(object):

    @staticmethod
    def request(url: str, render: bool=False, headers: dict=None, allow_redirects: bool=True):
        session = HTMLSession()
        response = session.get(url, headers=headers, allow_redirects=allow_redirects)
        if response.status_code == 200:
            if render is True:
                response.html.render()
            return response
        else:
            return None

    @staticmethod
    def parse(response, parse_func=None):
        try:
            parse_result = parse_func(response)
            return parse_result
        except:
            print("parse response wrong!")
            traceback.print_exc()

    @staticmethod
    def parse_html(html: HTML, parse_func=None):
        try:
            parse_result = parse_func(html)
            return parse_result
        except:
            print("parse_html wrong!")
            traceback.print_exc()

    def run(
        self, url: str, parse_func, render: bool=False,
        headers: dict=None, allow_redirects: bool=True
    ):
        try:
            response = self.request(url, render, headers, allow_redirects)
            if response is not None:
                parse_result = self.parse(response, parse_func)
                return parse_result
            return []
        except:
            return []


def parse_main_search_page(response):
    parse_results = []
    raw_offer_list = response.html.find('#sm-offer-list .common-offer-card')
    filtered_offer_list = [each for each in raw_offer_list if 'ad-item' not in each.attrs['class']]
    for offer in filtered_offer_list:
        repurchase_rate = offer.find('.mojar-element-offerTag .shop-repurchase-rate')
        repurchase_rate = repurchase_rate[0].text if repurchase_rate!=[] else ''
        each_result = {
            'thumbnail_img_url': re_pattern.search(offer.find('.img-container .img')[0].attrs['style']).groups()[0],
            'title': offer.find('.mojar-element-title .title')[0].text.replace('\n', ' '),
            'detail_page_url': offer.find('.mojar-element-title a')[0].attrs['href'],
            'repurchase_rate': repurchase_rate,
            'price': offer.find('.mojar-element-price .price')[0].text,
            'sale_count': offer.find('.sale .count')[0].text,
        }
        parse_results.append(each_result)
    return parse_results


def parse_detail_page(html: HTML=None):
    sorts_img_urls = []
    chima_dict = {}
    package_weight = ""
    per_unit_weight = ""
    description_img_urls = []
    detail_description_pairs = []    
    good_name = html.find('#mod-detail-title .d-title', first=True).text
    video_url = html.find('.video-content video')
    if video_url != []:
        video_url = video_url[0].attrs['src']
    img_elements = html.find('#dt-tab .tab-content-container li')
    for img_element in img_elements:
        raw_img_url = img_element.attrs.get('data-imgs')
        if raw_img_url is not None:
            sorts_img_urls.append(re_pattern2.search(raw_img_url).groups()[0])
    # min_sale_amount = html.find('.d-content .amount', first=True).text
    chima_infos = html.find('.table-sku tr')
    for chima_info in chima_infos:
        chima_name = chima_info.find('.name', first=True).text
        chima_price = chima_info.find('.price', first=True).text
        chima_amount_available = chima_info.find('.count', first=True).text
        chima_dict[chima_name] = {
            # 'name': chima_name,
            'price': chima_price,
            'amount_available': chima_amount_available
        }
    kuajing_infos = html.find('.detail-other-attr-content dd span')
    if kuajing_infos != []:
        package_weight = kuajing_infos[0].text
        per_unit_weight = kuajing_infos[1].text
    detail_contents = html.find('#mod-detail-attributes .obj-content tr')
    for detail_content in detail_contents:
        temp_list = detail_content.text.split('\n')
        for i in range(0, len(temp_list), 2):
            detail_description_pairs.append((temp_list[i], temp_list[i+1]))
    description_img_elements = html.find('#mod-detail-description #de-description-detail img')
    for description_img_element in description_img_elements:
        description_img_url = description_img_element.attrs.get('src')
        if description_img_url is not None:
            description_img_urls.append(description_img_url)
    detail_info = {
        'name': good_name,
        'video_url': video_url,
        'sorts_img_urls': sorts_img_urls,
        'chima_dict': chima_dict,
        # 'min_sale_amount': min_sale_amount,
        'package_weight': package_weight,
        'per_unit_weight': per_unit_weight,
        'detail_description_pairs': detail_description_pairs,
        'description_img_urls': description_img_urls,
    }
    return detail_info


def get_thumb_goods_info(spider: Spider, keyword: str, page: int):
    url = main_search_url.format(keyword=encode_chinese_characters(keyword), page=page)
    crawled_results = spider.run(
        url, parse_func=parse_main_search_page, render=True,
        headers={"user-agent": browser_user_agent},
        allow_redirects=True
    )
    return crawled_results


def save_detail_info_to_csv(good_hub_folder: str, detail_info: dict, thumb_info: dict, download_video: bool, download_sort_imgs: bool, is_translate: bool):
    detail_saved_folder = os.path.join(good_hub_folder, 'parsed_results', detail_info['name'])
    os.makedirs(detail_saved_folder, exist_ok=True)
    # download video
    if download_video is True and detail_info.get('video_url') is not None:
        try:
            print("开始下载商品介绍视频")
            r = requests.get(detail_info['video_url'], headers={"user-agent": browser_user_agent})
            with open(os.path.join(detail_saved_folder, '介绍视频.mp4'), 'wb') as f:
                f.write(r.content)
            print("视频下载完成")
        except:
            print("视频下载失败")

    # download sort imgs
    if download_sort_imgs is True and detail_info.get('sorts_img_urls') is not None:
        print("开始下载商品款式图片")
        for index, sorts_img_url in enumerate(detail_info['sorts_img_urls']):
            try:
                res = requests.get(sorts_img_url, headers={"user-agent": browser_user_agent})
                with open(os.path.join(detail_saved_folder, f'{index}.jpg'), 'wb') as f:
                    f.write(res.content)
                print(f"商品款式图片{index}下载完成")
            except:
                print(f"商品款式图片{index}下载失败")

    detail_csv_path = os.path.join(detail_saved_folder, f"(内部){detail_info['name']}.csv")
    detail_resource_folder = os.path.join(detail_saved_folder, "详情图片")
    os.makedirs(detail_resource_folder, exist_ok=True)
    try:
        with open(detail_csv_path, 'w', newline='', encoding='utf-8-sig') as fw:
            csv_writer = csv.writer(fw)
            csv_writer.writerow(['商品名称', '复购率', '成交量','商品详情网址', '视频网址', '跨境包裹重量', '单位重量'])
            csv_writer.writerow([
                detail_info['name'], thumb_info['repurchase_rate'], thumb_info['sale_count'],
                thumb_info['detail_page_url'], detail_info['video_url'], 
                detail_info['package_weight'], detail_info['per_unit_weight']
            ])
            english_name = translate_string(detail_info['name'])
            csv_writer.writerow([english_name if english_name is not None else ' '])
            csv_writer.writerow(['商品展示图片链接'])
            csv_writer.writerow(detail_info['sorts_img_urls'])
            csv_writer.writerow([])
            csv_writer.writerow(['尺码/款式价格对照表', '价格', '库存'])
            for key, value in detail_info['chima_dict'].items():
                csv_writer.writerow([key, value['price'], value['amount_available']])
            csv_writer.writerow([])
            csv_writer.writerow(['商品参数详情'])
            pair_name_list, pair_value_list = [], []
            for pair_name, pair_value in detail_info['detail_description_pairs']:
                if pair_name in NO_USE_PAIR_NAMES:
                    continue
                pair_name_list.append(pair_name)
                pair_value_list.append(pair_value)
            for i in range(0, len(pair_name_list), 9):
                csv_writer.writerow(pair_name_list[i:i+9])
                csv_writer.writerow(pair_value_list[i:i+9])
                if is_translate:
                    print("正在尝试中译英...")
                    trans_res = translate_string('+'.join(pair_value_list[i:i+9]))
                    csv_writer.writerow(['无法使用翻译服务,请检查您的网络是否通畅'] if trans_res is None else trans_res.split('+'))
                    csv_writer.writerow([])

            abs_description_img_paths = []
            for description_img_relative_path in detail_info['description_img_urls']:
                abs_description_img_paths.append(os.path.join(good_hub_folder, description_img_relative_path[1:]))
                shutil.copyfile(
                    good_hub_folder + description_img_relative_path[1:],
                    os.path.join(detail_resource_folder, os.path.split(description_img_relative_path)[1])
                )
        return True
    except:
        print("保存商品信息失败")
        traceback.print_exc()
    return False


def save_detail_info_to_csv_public_version(good_hub_folder: str, detail_info: dict, thumb_info: dict, is_translate: bool):
    detail_saved_folder = os.path.join(good_hub_folder, 'parsed_results', detail_info['name'])
    os.makedirs(detail_saved_folder, exist_ok=True)

    detail_csv_path = os.path.join(detail_saved_folder, f"(外部){detail_info['name']}.csv")
    try:
        with open(detail_csv_path, 'w', newline='', encoding='utf-8-sig') as fw:
            csv_writer = csv.writer(fw)
            csv_writer.writerow(['商品名称', '跨境包裹重量', '单位重量'])
            csv_writer.writerow([
                detail_info['name'], detail_info['package_weight'], detail_info['per_unit_weight']
            ])
            english_name = translate_string(detail_info['name'])
            csv_writer.writerow([english_name if english_name is not None else ' '])
            csv_writer.writerow([])
            csv_writer.writerow(['尺码/款式价格对照表', '价格', '库存'])
            for key, value in detail_info['chima_dict'].items():
                try:
                    price = float(value['price'][:-1]) * PRICE_SCALE
                    csv_writer.writerow([key, f"{round(price, 2)}元", value['amount_available']])
                except:
                    pass
            csv_writer.writerow([])
            csv_writer.writerow(['商品参数详情'])
            pair_name_list, pair_value_list = [], []
            for pair_name, pair_value in detail_info['detail_description_pairs']:
                if pair_name in PRIVATE_PAIR_NAMES:
                    continue
                pair_name_list.append(pair_name)
                pair_value_list.append(pair_value)
            for i in range(0, len(pair_name_list), 9):
                csv_writer.writerow(pair_name_list[i:i+9])
                csv_writer.writerow(pair_value_list[i:i+9])
                if is_translate:
                    print("正在尝试中译英...")
                    trans_res = translate_string('+'.join(pair_value_list[i:i+9]))
                    csv_writer.writerow(['无法使用翻译服务,请检查您的网络是否通畅'] if trans_res is None else trans_res.split('+'))
                    csv_writer.writerow([])

            csv_writer.writerow([])
            csv_writer.writerow(['重量(kg)', '价格(元)'])
            for weight, price in PRICE_FOR_WEIGHT.items():
                csv_writer.writerow([weight, price])
        return True
    except:
        print("保存对外商品信息失败")
        traceback.print_exc()
    return False


def show_in_finder_or_explorer(path):
    if not os.path.exists(path):
        return
    try:
        if platform.system() == 'Windows':
            os.startfile(path)
        else:
            os.system(f"open {path}")
    except:
        sg.Popup('打开文件夹的过程中遇到了一些问题,无法正常打开', title='提示')


def clean_good_hub_folder(good_hub_folder: str):
    if not os.path.exists(good_hub_folder):
        return
    for each in os.listdir(good_hub_folder):
        remove_target = os.path.join(good_hub_folder, each)
        if 'parsed_results' not in remove_target:
            try:
                os.remove(remove_target)
            except:
                shutil.rmtree(remove_target)
            finally:
                print(f"删除{remove_target}成功")


#----- layout -----

generate_result_layout = [
    [sg.Output(key='running_log', size=(54, 10), font=("", 12)),],
]

good_summary_layout = [
    [sg.Text('商品名称:  ', font=("", 12)), sg.InputText(key='good_name', readonly=True, font=("", 12), size=(41,1), text_color='black')],
    [sg.Text('商品链接:  ', font=("", 12)), sg.InputText(key='good_detail_url', readonly=True, font=("", 12), size=(41,1), text_color='black')],
    [sg.Text('商品价格:  ', font=("", 12)), sg.InputText(key='good_price', readonly=True, font=("", 12), size=(41,1), text_color='black')],
    [sg.Text('商品成交量:', font=("", 12)), sg.InputText(key='good_sale_count', readonly=True, font=("", 12), size=(41,1), text_color='black')],
    [sg.Text('商品复购率:', font=("", 12)), sg.InputText(key='good_repurchase_rate', readonly=True, font=("", 12), size=(41,1), text_color='black')],
    [sg.Text('商品缩略图:', font=("", 12)), sg.InputText(key='good_thumbnail_img_url', readonly=True, font=("", 12), size=(41,1), text_color='black')],
]

main_operate_layout = [
    [sg.T(), sg.Frame('商品大致信息', key="good_summary", layout=good_summary_layout)],
    [
        sg.T(), 
        sg.Checkbox('下载视频', font=("", 12), key='is_download_video'), 
        sg.Checkbox('下载款式图片', default=True, font=("", 12), key='is_download_sort_imgs'),
        sg.Checkbox('完成后打开文件夹', default=True, font=("", 12), key='is_open_extract_result'),
        sg.Checkbox('中译英', default=False, font=("", 12), key='is_translate'),
    ],
    [
        sg.T(), 
        sg.Button('用浏览器打开', font=("", 13)), 
        sg.Button('提取详情页', font=("", 13)),
        # sg.FileBrowse('选择详情页', font=("", 13), file_types=(("HTML Files", "*.html"),), enable_events=True, change_submits=True),
        sg.Button('打开仓库文件夹', font=("", 13)),
        sg.Button('清空仓库', font=("", 13)),
    ],
    [
        sg.Frame('运行日志', layout=generate_result_layout),
        sg.T(),
    ],   
]

layout = [      
    # [sg.Text('程序运行状态提示: 当前并未运行', key='spider_state', font=("", 13), size=(70, 1))],
    [sg.Text('商品仓库:'), sg.InputText(key='goods_detail_folder', font=("Helvetica", 12), size=(69, 1)), sg.FolderBrowse('选择文件夹')],
    [
        sg.T(),
        sg.Frame('爬取结果', layout=[
                [sg.Listbox(key='crawled_goods', values=[], size=(28, 21), font=("", 13), enable_events=True)],
                [
                    sg.Text(key='crawled_goods_description', text="爬取到0个商品"),
                    sg.Button('◀', key='prev_page'),
                    sg.Text(key='current_page', text="第_页"),
                    sg.Button('▶', key='next_page'),
                    sg.Button('获取商品'),
                ],
            ]
        ),
        sg.Column(main_operate_layout)
    ],
    [sg.T(),],
]      


if __name__ == '__main__':
    # from pprint import pprint
    # html_path = r'E:\developer\PythonProject\spider_1688\good_hub\【谷一】2020亚马逊欧美跨境爆款女装现货 性感吊带印花连衣长裙-阿里巴巴.html'
    # html = read_html(html_path)
    # pprint(parse_detail_page(html=html))
    # exit()
    keyword = None
    current_page = 1
    crawled_goods = []
    crawled_goods_info = []
    current_thumb_good_info = {}
    spider = Spider()
    window = sg.Window(f'爬虫工具v{__version__}', layout, default_element_size=(40, 1), grab_anywhere=False)

    print("正在检测环境依赖...")
    check_if_install_pyppeteer_chromium()
    print("检查完成")

    while True:
        event, values = window.read()    
        if event == sg.WIN_CLOSED:
            break
        if event == '获取商品':
            if not os.path.exists(values['goods_detail_folder']):
                sg.Popup('请您先设置数据仓库位置', title='提示')
                continue
            clean_good_hub_folder(values['goods_detail_folder'])
            new_keyword = sg.popup_get_text("请输入搜索关键词", title='1688主页搜索', default_text='欧美 手提包', keep_on_top=True)
            if new_keyword is None:
                continue
            print("程序运行状态提示: 正在爬取1688主页，请耐心等待...")
            crawled_goods_info = get_thumb_goods_info(spider, new_keyword, 1)
            if crawled_goods_info == []:
                sg.Popup('没有爬取到商品，请检查网络连接或更换搜索关键词，或手动打开1688网页查看是否需要输入验证码', title='提示')
            else:
                crawled_goods = []
                for result in crawled_goods_info:
                    crawled_goods.append((result['title']))
                window['crawled_goods'].update(crawled_goods)
                window['current_page'].update(f"第{1}页")
                window['crawled_goods_description'].update(f"{len(crawled_goods)}个商品")
                current_page = 1
                keyword = new_keyword
            print("程序运行状态提示: 爬取过程结束")
        elif event == 'prev_page':
            #TODO use cache to store contents that have been crawled
            if current_page == 1:
                sg.Popup('当前已经是第1页,无法再往前翻页', title='提示')
                continue
            print("程序运行状态提示: 正在爬取，请耐心等待...")
            crawled_goods_info = get_thumb_goods_info(spider, keyword, current_page-1)
            if crawled_goods_info == []:
                sg.Popup('没有爬取到商品，请检查网络连接或更换搜索关键词，或手动打开1688网页查看是否需要输入验证码', title='提示')
            else:
                crawled_goods = []
                for result in crawled_goods_info:
                    crawled_goods.append((result['title']))
                window['crawled_goods'].update(crawled_goods)
                window['current_page'].update(f"第{current_page-1}页")
                window['crawled_goods_description'].update(f"{len(crawled_goods)}个商品")
                current_page -= 1
            print("程序运行状态提示: 爬取过程结束")
        elif event == 'next_page':
            if len(crawled_goods) == 0:
                sg.Popup('请先使用<获取商品>功能获取商品', title='提示')
                continue
            print("程序运行状态提示: 正在爬取，请耐心等待...")
            crawled_goods_info = get_thumb_goods_info(spider, keyword, current_page+1)
            if crawled_goods_info == []:
                sg.Popup('没有爬取到商品，请检查网络连接或更换搜索关键词，或手动打开1688网页查看是否需要输入验证码', title='提示')
            else:
                crawled_goods = []
                for result in crawled_goods_info:
                    crawled_goods.append((result['title']))
                window['crawled_goods'].update(crawled_goods)
                window['current_page'].update(f"第{current_page+1}页")
                window['crawled_goods_description'].update(f"{len(crawled_goods)}个商品")
                current_page += 1
            print("程序运行状态提示: 爬取过程结束")
        elif event == 'crawled_goods':
            try:
                good_name = values['crawled_goods'][0]
            except IndexError:
                continue
            for crawled_good_info in crawled_goods_info:
                if crawled_good_info['title'] == good_name:
                    window['good_name'].update(good_name)
                    window['good_detail_url'].update(crawled_good_info['detail_page_url'])
                    window['good_price'].update(crawled_good_info['price'])
                    window['good_sale_count'].update(crawled_good_info['sale_count'])
                    window['good_repurchase_rate'].update(crawled_good_info['repurchase_rate'])
                    window['good_thumbnail_img_url'].update(crawled_good_info['thumbnail_img_url'])
                    current_thumb_good_info = crawled_good_info
                    break
        elif event == '用浏览器打开':
            try:
                webbrowser.open_new_tab(current_thumb_good_info['detail_page_url'])
            except:
                sg.Popup('无法使用浏览器打开', title="提示")
        elif event == '提取详情页':
            # TODO 提取详情页之后在列表中提示已经处理过
            goods_detail_folder = values['goods_detail_folder']
            if not os.path.exists(goods_detail_folder):
                sg.Popup('商品仓库路径不存在', title='提示')
                continue
            html_paths = glob.glob(os.path.join(goods_detail_folder, '*.html'))
            print("程序运行状态提示: 正在提取网页中的商品信息")
            try:
                html = read_html(html_paths[0])
                detail_info = spider.parse_html(html, parse_func=parse_detail_page)
                save_result = save_detail_info_to_csv(
                    values['goods_detail_folder'], detail_info, current_thumb_good_info, 
                    values['is_download_video'], values['is_download_sort_imgs'], values['is_translate']
                )
                outer_save_result = save_detail_info_to_csv_public_version(
                    values['goods_detail_folder'], detail_info, current_thumb_good_info, values['is_translate']
                )
                if outer_save_result is False:
                    sg.Popup('提取给运营的商品信息失败!')
                if save_result is True:
                    sg.Popup('提取商品信息成功')
                    if values['is_open_extract_result'] is True:
                        show_in_finder_or_explorer(
                            os.path.join(values['goods_detail_folder'], 'parsed_results', detail_info['name'])
                        )
                else:
                    sg.Popup('提取商品信息失败')
            except:
                with open('error.txt', 'w') as f:
                    traceback.print_exc(file=f)
                sg.Popup('提取商品信息失败,把商品详情网址发给我~')
            finally:
                clean_good_hub_folder(values['goods_detail_folder'])
            print("程序运行状态提示: 提取商品信息过程结束")
        elif event == '打开仓库文件夹':
            if os.path.exists(values['goods_detail_folder']):
                show_in_finder_or_explorer(values['goods_detail_folder'])
            else:
                sg.Popup('仓库文件夹不存在,请先设定仓库文件夹')
        elif event == '清空仓库':
            clean_good_hub_folder(values['goods_detail_folder'])
        # print(event, values)
