import re
import time
from datetime import datetime

import requests
from bs4 import BeautifulSoup
from sqlalchemy import Column, Integer, String, create_engine, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

import made_in_china_detail

header = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
          'Accept-Encoding': 'gzip, deflate, br',
          'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
          'Cache-Control': 'max-age=0', 'Connection': 'keep-alive',
          'Cookie': 'pid=jEuNTIuMTY2LjE2MDIwMjEwMzA5MDg0ODIzNDkyMjAwOTI0NTEN; webp=t; __pd=1f0a79mi66c5; sf_img=AM; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22fKkTZNSunDrL_00%22%2C%22first_id%22%3A%221781474daa223b-0eeec902869294-4c3f227c-2073600-1781474daa37ee%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%221781474daa223b-0eeec902869294-4c3f227c-2073600-1781474daa37ee%22%7D; _kwd=29tX35oYW5nemhvdQY; _skwd=29tX35Eb29yIExvY2t+ISxjb21ffk5haWx+ISxjb21fflNvZmEgTGVnY; __snaker__id=bQ5PmKsZHmUd3Krz; gdxidpyhxdE=q924zMWDo0dEZNkCd8UjXI2BJBy9Pk35JO1tlhZZkYmlflUTql8S1uDdNCzzN1zHV4pY0OwJ0gh%5CynePU%2BtKOWX6AzR9U5WYeYKnGMBxZBmbT%5CE9jl7CB%5Ca4%2BLztxJUBkDscknW%2FA4Xt24%2Fk%5CXx7XC5d3%5C%2F%2BWNssv7ontD5Rd47Az%2FXp%3A1615865861580; _9755xjdesxxd_=32; YD00400313292638%3AWM_NI=N9R%2BS1a09t7hNbGzvMhgTdQ0wLnGgLhNo4JttR1BoCRT7VIn3IZhl2U56O2tNpfL12%2F3ec6%2BH7bo5mt29wnpEXS9oOJjqZQYsR9LFkKHbxKvoPOw9HJm6opeCVPmjDh%2BeFg%3D; YD00400313292638%3AWM_NIKE=9ca17ae2e6ffcda170e2e6eeb4eb4fa18eb7d5d86481e78ea2c55b938b8baef17496ab99b7e839a19ebca2c12af0fea7c3b92a8ee79eccd23ba3f0f795b6648ebe899bf547acb99c85d43cb594a2a8e648ac92a1d5c980b88effa3ee49928cf9b7cc66f1979689ef3a97ebfab0b1808eb0aaadb43aededab84b5218ce9ae90b867b8eef798ed7c87889ddaee458ab6bfbbd453b18ef7adc5618998ae99ca7ead908198bc448ab3fa90f36f818aa08ee950acf5aba7e637e2a3; YD00400313292638%3AWM_TID=TwALtQLxurVFBBBUARZv0RIx0W9VuICZ; lg_name=9c0584a3e5b0f85a; lg=Z4EeRD3wvRcY3RjVgZqFMA==~@~Fuy51L0tspPWe3+50wJwlA==~@~a6OuXcqRHbj9Uwz9QyortB8rhDzey8E9~@~a6OuXcqRHbjb/GxrB9XZ/ona5fxcDDwsBcbX4ygX5xo=~@~Fuy51L0tspPd5kJrLZozlw==~@~; se=jEuNTIuMTYzLjg1MjAyMTAzMTYxMTIyMjg2NzgzMDA1NzEwN; dpr=1; cid=jAyMTAzMTYxMTIyNTE5MTkwMDA6MDU0Nzk2NTc1NTE0MTk2OTQ4MDcM; sid=jI5Nzk4NzI0NDE3NTk0OTc6NjEuNTIuMTYzLjg1OjE3NDU3OTMxODI6MDAM; JSESSIONID=A4AAA475FA2EBD6DFE9B3B1336E7715F; CPID=0t/tAYD2481m/JmD1K3SmenYlkCJJaFO; LVT=UxklJARHkRztwDV1pXpYCcC0TDlAywe2; LOGT=P8G4+UJ4WhFpKGkSGc4LNfqY7uOWQtGW; loginSource=1; wel_name=2FvZ; cbid=Tc0NTc5MzE4MjowMAM',
          'Host': 'www.made-in-china.com', 'TE': 'Trailers', 'Upgrade-Insecure-Requests': '1',
          'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0'}

main_url = ''

url = 'https://www.made-in-china.com/companysearch.do?word=hangzhou&file=&subaction=hunt&style=b&mode=and&code=0&comProvince=nolimit&order=0&isOpenCorrection=1'

# 创建对象的基类:
Base = declarative_base()
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://root:yizheng@192.168.0.245:3306/test',
                       echo=True,
                       max_overflow=5)
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()

hsession = requests.session()
# 定义对象:
class CpUser(Base):
    # 表的名字:
    __tablename__ = 'made_in_china_hangzhou'

    # 表的结构:
    id = Column(Integer, primary_key=True)
    Address = Column(String(200))
    Telephone = Column(String(200))
    MobilePhone = Column(String(200))
    Fax = Column(String(200))
    Showroom = Column(String(200))
    infoDetail = Column(String(200))
    url = Column(String(200))
    cpName = Column(String(200))
    time = Column(DateTime, default=datetime.now)


Base.metadata.create_all(engine)


def spider(url):
    r = hsession.get(url, headers=header)
    soup = BeautifulSoup(r.text, 'lxml')
    data = soup.select('div.list-node > h2 > a')
    print(url)
    print(r)
    next_page = 'https:' + soup.select('a.next')[0].get('href')
    i = 0
    #
    global main_url
    print(data)
    for d in data:
        main_url = d.get('href')
        print(main_url)
        contactinfo_rul = 'https:' + main_url[0:main_url.rindex('com') + 3] + '/contact-info.html'
        parse_detail(1, contactinfo_rul)

    while True:
        i += 1
        spider(next_page)


def parse_detail(index, contactinfo_rul):
    rt = made_in_china_detail.spider_detail(contactinfo_rul)
    soup = BeautifulSoup(rt, 'lxml')
    label = soup.select(
        '.contact-info > div > div:nth-child(1)')
    fields = soup.select(
        '.contact-info > div > div:nth-child(2)')
    info_detail = soup.select(
        '.info-detail')
    names = (soup.select('.title-txt > a:nth-child(1) > h1:nth-child(1)')
             or soup.select(
                'div.com-name > div:nth-child(1) > div:nth-child(1) > h1:nth-child(1) > a:nth-child(1)')
             or soup.select(
                '.com-name-txt > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1) > h1:nth-child(1)')
             )
    try:
        name = names[0].get_text()
    except:
        print(contactinfo_rul)
        print(3 * index)
        time.sleep(3 * index)
        parse_detail(index + 1, contactinfo_rul)
        return

    name = re.sub(r'[\f\n\r\t\v]',
                  '',
                  name).strip()
    msg = {}
    for (i, v) in enumerate(label):
        if v.get_text():
            field = fields[i]
            if len(field) > 1:
                field = field.select(':nth-child(1)')[0]
            msg[re.sub(r'[:./\s]+', '', v.get_text())] = re.sub(r' +',
                                                                ' ',
                                                                re.sub(r'[\f\n\r\t\v]',
                                                                       '',
                                                                       field.get_text().strip()))

            # msg[v.get_text().replace(' ', '').replace(':', '').replace('.', '').replace('/', '')] = \
            #     fields[i].get_text().strip()
    info_detail_str = ''
    for i in info_detail:
        info_detail_str += i.get_text().strip()
    msg['infoDetail'] = info_detail_str.replace('\n', ',')
    cp = CpUser(Address=msg.get('Address'),
                Telephone=msg.get('Telephone'), MobilePhone=msg.get('MobilePhone'),
                Fax=msg.get('Fax'), Showroom=msg.get('Showroom'), infoDetail=msg.get('infoDetail'),
                url=contactinfo_rul, cpName=name)
    session.add(cp)
    session.commit()


if __name__ == '__main__':
    spider(url)
