import json
import re

import requests
from bs4 import BeautifulSoup
from django.http import HttpResponse

from chatbot.component.table_utils import ExcelWriteUtil
from chatbot.settings import logger

host = 'info.ci123.com'


def spider_staff_info_ci123(headers):
    headers['Host'] = 'oa.corp-ci.com'

    departments = {
        '1/1': '总裁办',
        '1/2': '财务部',
        '1/3': '人事部',
        '1/4': '行政部',
        '1/5': '市场部',
        '1/6': '电子商务部',
        '1/8': '新媒体运营',
        '1/9': '客户部',
        '1/10': '运营部',
        '1/11': '孕期提醒',
        '1/12': '运维部',
        '1/13': '健康事业部（柏橙）',
        '1/15': '中台事业部',
        '2': '亲子周末事业部(千鱼)',
        '3': '教育事业部(赛诚)',
        '18': '互娱事业部(红豆)'

    }
    cookies = {
        'QwlQt_adminid': "2241",
        'QwlQt_adminname': 'zhouguangjie%40corp-ci.com',
        'QwlQt_adminrealname': '%E5%91%A8%E5%85%89%E6%9D%B0',
        'QwlQt_adminrealhash': '93fd8683404b3db9',
        'QwlQt_adminhash': '93fd8683404b3db9',
        'openemail': 'zhouguangjie%40corp-ci.com',
        'openhash': 'wwbn1k3zeXJ8U',
        'openadminname': 'zhouguangjie',
        'openpassword': 'b1b6ff2c94147701a5ee881c665acb1e',
        'openname': 'zhouguangjie',
        'openid': '2241',
        'PHPSESSID': 'etv7aaq4hss21jctebn2ocknr0',
        '_pk_id.3.493b': '6b2bbe2eb41d885c.1602843240.2.1604653472.1602843240.'

    }
    excel_headers = ['部门', '姓名', '工号', '职位', '星座', '进橙时间', '个性签名']
    excel = ExcelWriteUtil('/Users/zhou/Documents/PycharmProject/ChatBot/ci123-staff.xlsx', 'xlsx')
    excel.set_header(excel_headers)
    base_url = 'http://oa.corp-ci.com/oa.php/OrangeStyle/album/'
    for dept_key in departments.keys():
        row = {'部门': departments[dept_key]}
        url = base_url + dept_key
        r = requests.get(url, stream=True, verify=False, headers=headers, cookies=cookies)
        soup = BeautifulSoup(r.content, 'html.parser', from_encoding='utf-8')
        for profiles in soup.find_all('div', class_='emp-info'):
            properties = profiles.find_all('div')
            row['姓名'] = properties[0].string
            row['工号'] = properties[1].string.split('：')[1]
            row['职位'] = properties[2].string.split('：')[1]
            row['星座'] = properties[3].string.split('：')[1]
            row['进橙时间'] = properties[4].string.split('：')[1]
            row['个性签名'] = properties[5].get_text().split('：')[1]
            excel.append_row(row)
            excel.save()
    excel.save()
    return HttpResponse(json.dumps({'result': {"state": "ok"}}), content_type="application/json")


def spider_product_info_ci123(headers):
    headers['Host'] = host

    # url = 'http://info.ci123.com/brand/brand/brands.php'
    #
    # r = requests.get(url, stream=True, verify=False, headers=headers)

    f = open(first_page, 'r')

    soup = BeautifulSoup(f.read(), 'html.parser', from_encoding='utf-8')
    total_brands = soup.find_all('ul', class_='brand_detail')
    result = []

    excel_headers = ['产品', '品牌', 'URL', '参考价格', '适用年龄', '综合评分', '浏览量', '点评数', '产品介绍']
    url = 'http://info.ci123.com/brand/list/all.php?brand_id='
    excel = ExcelWriteUtil('/Users/zhou/Documents/PycharmProject/ChatBot/ci123.xlsx', 'xlsx')
    excel.set_header(excel_headers)
    base_url = 'http://info.ci123.com/brand'
    for alpha_brands in total_brands:
        for brand in alpha_brands.find_all('a'):
            link = brand.get('href')
            matcher = re.search(r'id=(\w+?)$', link)
            brand = brand.get_text()
            if not matcher:
                continue
            brand_id = matcher.groups()[0]
            brand_url = url + brand_id
            print(brand, brand_id, brand_url)

            r = requests.get(brand_url, verify=False, headers=headers)
            product_page = BeautifulSoup(r.content, 'html.parser', from_encoding='utf-8')

            for product in product_page.find_all('div', class_='p_con'):
                try:
                    row = {}
                    product_name = product.a.get_text().strip()
                    row['产品'] = product_name
                    row['品牌'] = brand

                    product_url = base_url + product.a.get('href').replace('..', '')
                    row['URL'] = product_url

                    price_info = product.div.div
                    price_info = price_info.get_text().split('\u3000')
                    product_price = price_info[0].split('：')[1]
                    row['参考价格'] = product_price

                    product_for_age = price_info[1].split('：')[1]
                    row['适用年龄'] = product_for_age

                    product_star = product.find('span', class_='red').get_text()
                    row['综合评分'] = product_star

                    product_view_num = product.find('div', class_='num').span.get_text()
                    product_star_num = product.find('div', class_='num').span.fetchNextSiblings('span')[0].get_text()
                    product_desc = product.p.get_text()
                    row['浏览量'] = product_view_num
                    row['点评数'] = product_star_num
                    row['产品介绍'] = product_desc
                    print(row)

                    excel.append_row(row)
                except Exception as e:
                    logger.error(e, exc_info=True)
                    excel.save()
                    continue
            excel.save()
        excel.save()
    return HttpResponse(json.dumps({'result': result}), content_type="application/json")
