import json
from datetime import datetime
from time import sleep
from urllib import parse

import requests
from bs4 import BeautifulSoup
from openpyxl import load_workbook
from sqlalchemy import Column, String, create_engine, Integer, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

from splderDetail import spider_detail

header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0',
    'Cookie': 'ali_apache_id=11.179.217.160.1618972227927.628328.6; XSRF-TOKEN=11335756-59f8-4ed1-9a24-7801adbd8e83; cookie2=17848294ee456b81c2c5ec87f767d863; t=a7d744c355409ba379ac70c652e8e890; _tb_token_=381de57e3eb3e; xman_us_f=x_locale=zh_CN&x_l=1&last_popup_time=1618972305441&x_user=CN|jifeng|wang|cnfm|250856201&no_popup_today=n; acs_usuc_t=acs_rt=be36e6675fca47d3911fe629b17489f2; xman_t=wYnz5mBpHrir60ZxqzCV/T+Iu20azEal9vWdpK/T0aGYBhJvb7W4luaVLkacq4mFrY5T0wKxtVv27HCv5zOy+9HR2DELGif2NkRv9N1Kl8Z8Mjeem28Rwqwl2So3qFi+Yaddqb6mXqsblsSlZZ6sSMHqOQf645naFKbUdSk1SO+3sCyWBSIJtgo9t5JQFqPV7aNCZOzv66eV5sA9DnGieE9uMrZUbGIW6W7nZzo13a0Dp5CkR1Y976SxOyD3XuPFvIJspei1memSs421f0Ct2DKMy7nCIsXbNvYpdV58kC7EXy6YqWvNZVaMRMbBIwfbwu2fNFzA++BJeeA3HixKnHoYpYHoXtxYTIPo56YfJSRQzI0dV+MUdKUoGaPtXUJt6nlY8vkbTMmxnjUSJpl3dsHMDt/gu7DyE9e81ZtgMN6umExmIWZzZAA/CoDkb9KOXUzsss7orTrG981+UzpjFn2UiStodBr1Zm2XCpPnkai1jxvvn+cuO/xXAIjbxYe7x/9qqvLH33LwgHYJpRKaaKT7bm3U2w/DQvPRhp1ER3rxRFrevqpiRwuQvTw8APAgoGW+0OcGT24PeeVRfeKdTP6Cnb5UqrLF47FuFrLigtjwSCqbyRWFY/ClAHx3z7cocZfi2T3s8G9jwP2XjuGxDYLbkY8AmjsL9rBbAm1nkZFZnsluD8K9R4Fj4o1RXUbG9LOb8N4mckaoxFmk+KpYvhGOGxcvQGha; xman_f=72zP9OsVXhwjt8gvASZTweujlBOPoU2YcxrrkDBr50O3A1SCCX96Ci/NVhWpiRHvVtuMNJ+Q1IQyGbGVrQnu9OT6DQ58L60I9dczOiu8LWs6v1HAqL96l5um2uvlrYlVK45ah5vhubxML02y+NdTW0DrmjIwPeIkI09JhtYYBAAiegKE3DevsGnrDwOjDQkMhg6h+vKqWz83fmZ0KmUF/klxiS0NppuOdtpHn81AiqrumS+7+Djshb7ktKgs2FBMdlvEuXKE4cjtbNG4z2d+KdAA7il+DAB4ImSQlh16wxJ0XZNgKIGpUhc+ZqjqtO+QguCUOlCIM/xTP8hs3AuHmvHYi2ne52I2CDncQSIFLyd/naFXGMBq+jguqvkt9tTfb9+YNIvB5bOmsVjvqHQhYg==; JSESSIONID=46C43942E900CA8D1C6A7BD696AC7241; cna=RnwGGVNnHUQCAd0P6vRyPIMt; _samesite_flag_=true; tfstk=c-xCByDR3WVQiiqzepMa4ZW5-UjlZsyfF21HdE6yuhAIf1J1isEVGxlvr7QdjO1..; isg=BC8v8Sk1k9mb0Zflv6sL4GRXvUM51IP2tNC9aUG8yR6lkE-SSaNkRs5KFgCu81tu; l=eBL3VBkIjBZm-VskXOfalurza779IIOYYuPzaNbMiOCPOHfp5kARB6aHfbL9Cn1Vhs_wR3J-vJ6JBeYBqhDu0d6Omcgsr4Mmn; xlly_s=1; atm-whl=-1%260%260%260; atm-x=__ll%3D-1; ali_apache_track=mt=2|mid=cn1540671967kart; ali_apache_tracktmp=W_signed=Y; history=company%5E%0A251595772%24%0A200698103%24%0A230357703; _hvn_login=4; csg=b56f7072; xman_us_t=ctoken=8qjiu_h9syxf&l_source=alibaba&x_user=ptTipsnP9EqWRsjQ6mzrAU6dcqv6MwC826VWnMi+Q/U=&x_lid=cn1540671967kart&sign=y&need_popup=y; intl_locale=zh_CN; intl_common_forever=ZiYjfVakalmKDV4sn+fqfi4J8AfY4QRi2hrZTaYZHmOuFOxaDRai6Q==; _m_h5_tk=2f5c60fc7500ba11c6d046bc4fdca23c_1618979870691; _m_h5_tk_enc=465311a4e885a4e485514455774ed239; x5sec=7b22736370726f78793b32223a223263333764376361323039363235343937376462373335306166613035306339434a61732f6f4d47454a54737738335634757665474443362f664e2f227d'
    # 'Referer': url

}
main_url = ''

url = 'https://www.alibaba.com/trade/search?fsb=y&IndexArea=company_en&CatId=&SearchText=ningbo'

# 创建对象的基类:
Base = declarative_base()
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://root:yizheng@192.168.0.245:3306/test',
                       echo=True,
                       max_overflow=5)
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()


# 定义对象:
class CpUser(Base):
    # 表的名字:
    __tablename__ = 'ningbo'

    # 表的结构:
    id = Column(Integer, primary_key=True)
    contact_name = Column(String(200))
    Telephone = Column(String(200))
    Address = Column(String(200))
    Zip = Column(String(200))
    CountryRegion = Column(String(200))
    ProvinceState = Column(String(200))
    City = Column(String(200))
    CompanyName = Column(String(200))
    OperationalAddress = Column(String(200))
    Website = Column(String(200))
    Websiteonalibabacom = Column(String(200))
    accountFax = Column(String(200))
    accountMobileNo = Column(String(200))
    accountPhone = Column(String(200))
    time = Column(DateTime, default=datetime.now)

    # 哈希码不相同的对象，集合会认定它们是不同的元素
    # 哈希码相同的对象，集合还不能判定该元素是否重复，因为有可能是哈希碰撞（冲突）
    # def __hash__(self):
    #     return hash(self.contact_name)
    #
    # def __eq__(self, other):
    #     return self.contact_name == other.contact_name and \
    #            self.Telephone == other.Telephone and \
    #            self.Address == other.Address and \
    #            self.Zip == other.Zip and \
    #            self.CountryRegion == other.CountryRegion and \
    #            self.ProvinceState == other.ProvinceState and \
    #            self.City == other.City and \
    #            self.CompanyName == other.CompanyName and \
    #            self.OperationalAddress == other.OperationalAddress and \
    #            self.Website == other.Website and \
    #            self.Websiteonalibabacom == other.Websiteonalibabacom and \
    #            self.accountFax == other.accountFax and \
    #            self.accountMobileNo == other.accountMobileNo and \
    #            self.accountPhone == other.accountPhone


Base.metadata.create_all(engine)

hsession = requests.session()


def spider(url):
    r = hsession.get(url, headers=header)
    soup = BeautifulSoup(r.text, 'lxml')
    data = soup.select(
        '#J-items-content > div > div > div.top > div.corp > div.item-title > div.title-wrap > h2 > a')
    i = 0

    global main_url
    main_url = data[0].get('href')
    # header['Referer'] = main_url
    # contactinfo_rul = main_url[0:main_url.rindex('/') + 1] + 'contactinfo.html'
    # rt = requests.get(contactinfo_rul, header).text

    # parse_detail(rt)
    for d in data:
        main_url = d.get('href')
        # header['Referer'] = main_url
        contactinfo_rul = main_url[0:main_url.rindex('/') + 1] + 'contactinfo.html'
        rt = hsession.get(contactinfo_rul, headers=header).text
        # print(r.cookies.)
        parse_detail(rt)
        sleep(0.1)
        break

    while i < 73:
        i += 1
        next_page = ''
        try:
            next_page = 'https:' + soup.select('.next')[0].get('href')
            spider(next_page)
            sleep(0.1)
        except Exception as e:
            print(e)
            print(next_page)
            break


def parse_detail(rt):
    soup = BeautifulSoup(rt, 'lxml')

    root = soup.select('#hd > div > div > div')
    try:
        data_dict = json.loads(parse.unquote(root[1].get('module-data')))
        encryptAccountId = data_dict['mds']['moduleData']['data']['encryptAccountId']
    except:
        try:
            data_dict = json.loads(parse.unquote(root[0].get('module-data')))
            encryptAccountId = data_dict['mds']['moduleData']['data']['encryptAccountId']
        except:
            return

    try:
        contact_name = soup.select('.contact-name')[0].get_text()
    except:
        contact_name = ''

    msg = {
        'contact_name': contact_name,
    }

    msg_table = soup.select('.info-table > tr')
    for i in msg_table:
        if i.select('td'):
            msg[i.select('th')[0].get_text()
                .replace(' ', '').replace(':', '').replace('.', '').replace('/', '')] = \
                i.select('td')[0].get_text()

    tt = soup.select('tr.info-item ')
    for i in tt:
        msg[i.select('th > span')[-1].get_text()
            .replace(' ', '').replace(':', '').replace('.', '')] = i.select('td')[0].get_text()

    detail_url = main_url[0:main_url.rindex(
        '/') + 1] + 'event/app/contactPerson/showContactInfo.htm?encryptAccountId=' + encryptAccountId

    if 'https' not in detail_url:
        detail_url = detail_url.replace('http', 'https')
    detail_msg = {
        'contactInfo': {}
    }
    try:
        detail_msg = json.loads(spider_detail(detail_url))
    except:
        pass
    all_msg = dict(msg, **detail_msg['contactInfo'])
    user = CpUser(contact_name=all_msg.get('contact_name'), Telephone=all_msg.get('Telephone'),
                  Address=all_msg.get('Address'), Zip=all_msg.get('Zip'),
                  CountryRegion=all_msg.get('CountryRegion'),
                  ProvinceState=all_msg.get('ProvinceState'),
                  City=all_msg.get('City'), CompanyName=all_msg.get('CompanyName'),
                  OperationalAddress=all_msg.get('OperationalAddress'),
                  Website=all_msg.get('Website'),
                  Websiteonalibabacom=all_msg.get('Websiteonalibabacom'),
                  accountFax=all_msg.get('accountFax'),
                  accountMobileNo=all_msg.get('accountMobileNo'),
                  accountPhone=all_msg.get('accountPhone'), )
    session.add(user)
    session.commit()


def outxls():
    allCp = session.query(CpUser).all()
    print(len(allCp))
    ks = ['contact_name',
          'Telephone',
          'Address',
          'Zip',
          'CountryRegion',
          'ProvinceState',
          'City',
          'CompanyName',
          'OperationalAddress',
          'Website',
          'Websiteonalibabacom',
          'accountFax',
          'accountMobileNo',
          'accountPhone']
    ex = load_workbook('F:/test/wjf/res/zhejiang_shaoxing.xlsx', data_only=True)
    sheet1 = ex['Sheet1']
    for (i, k) in enumerate(ks):
        sheet1.cell(1, i + 1).value = k
    cpSet = set()
    for i in allCp:
        cpSet.add(i)
    print(len(cpSet))
    cpList = list(cpSet)
    for i in range(len(cpList)):
        for (j, k) in enumerate(ks):
            sheet1.cell(i + 2, j + 1).value = cpList[i].__dict__[k]
        # sheet1.cell(1, i + 1).value = cpList[i]
    ex.save('宁波_不去重.xlsx')


if __name__ == '__main__':
    outxls()
    # spider(url)



    # print(len(session.query(CpUser).all()))
    # spider(url)
    # user1 = CpUser()
    # user1.contact_name = 'wind'
    # user2 = CpUser()
    # user2.contact_name = 'wind'
    # set1 = {user1, user2}
    # print(set1[0])
    # print(user1 == user2)
    # a = {
    #     "contact_name": "Ms. Suzy Tian",
    #     "Telephone": "View details",
    #     "Address": "2nd FL,4TH BUILDING, 2 XIYUANWU ROAD, WESTLAKE TECHNOLOGY GARDEN",
    #     "Zip": "310030",
    #     "CountryRegion": "China",
    #     "ProvinceState": "Zhejiang",
    #     "City": "Hangzhou",
    #     "CompanyName": "Hangzhou Hiland Technology Co., Ltd.",
    #     "OperationalAddress": "Room 803, West 1st Building, Xigang Development Center, Sandun Town, Hangzhou, Zhejiang, China",
    #     "Website": "http://www.hiland.cchttp://michiland.en.made-in-china.com/",
    #     "Websiteonalibabacom": "hiland.en.alibaba.com",
    #     "accountFax": "86-571-81958377",
    #     "accountMobileNo": "13588813115",
    #     "accountPhone": "86-571-81958387"
    # }
    # ks = a.keys()
    # t = 'user = CpUser('
    # for k in ks:
    #     t += k + '=all_msg[\'' + k + '\'],'
    # t += ')'
    # print(t)
