import requests
from bs4 import BeautifulSoup
import time

local = time.strftime("%Y.%m.%d")
headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"
}


def download_page(url):
    r = requests.get(url, headers=headers)
    return r.text


#获取图片
def download_img(picUrl, title):
    read = requests.get(picUrl, headers=headers)
    img_name = title[:4]
    f = open('%s.jpg' % img_name, 'wb')
    #f = open('%s.jpg' % local, 'wb')
    f.write(read.content)
    f.close()


#页码 模块 标题 简介 日期
def get_content(html, page, keyword):
    output = """
第{}页
    类别：{}
    公司：{}
    主营：{}
    地址：{}
    注册资本：{}
    成立时间：{}
    联系人：{}
    电话：{}
    手机：{}
   
------------------------------------

"""

    soup = BeautifulSoup(html, 'html.parser')
    #save_txt('{}'.format(soup))

    con_list = soup.find_all('div', class_="contbox")
    for i in con_list:
        company = i.find('dt', class_='til').find('h3').find('a').get_text()
        company = company.strip('\r\n')

        product = i.find('dt', class_='info').find('a').find('font').get_text()
        product = product.strip('\r\n')

        address = i.find('em', string='所在地址：').find_parent('a').get_text()
        address = address.strip('\r\n').lstrip('所在地址：')

        reg_money = i.find('em', string='注册资本：').find_parent('a').get_text()
        reg_money = reg_money.replace('\r\n', '').strip('\r\n').lstrip('注册资本：')

        build_time = i.find('em', string='成立时间：').find_parent('a').get_text()
        build_time = build_time.replace('\r', '').replace('\n',
                                                          '').lstrip('成立时间：')

        #链接联系人
        link_url = i.find('dt', class_='til').find('h3').find('a')['href']
        #print(link_url)
        link_html = download_page(link_url)
        link_soup = BeautifulSoup(link_html, 'html.parser')

        client = ''
        shot_phone = ''
        telephone = ''
        try:
            client = link_soup.find('span',
                                    class_='ContactLeft letter04',
                                    string='联系人').find_parent('li').get_text()
            client = client.strip('\n').lstrip('联系人：')

            shot_phone = link_soup.find(
                'span', class_='ContactLeft letterLeft',
                string='电话').find_parent('li').get_text()
            shot_phone = shot_phone.strip('\n').lstrip('电话：')

            telephone = link_soup.find(
                'span', class_='ContactLeft letterLeft',
                string='手机').find_parent('li').get_text()
            telephone = telephone.strip('\n').lstrip('手机：')

        except AttributeError:
            pass
        finally:
            save_txt(
                output.format(page, keyword, company, product, address,
                              reg_money, build_time, client, shot_phone,
                              telephone))


def save_txt(*args):
    for i in args:
        with open('_hc360.com.txt', 'a', encoding='utf-8') as f:
            f.write(i)


def get_pages(keyword):
    row_url = 'https://s.hc360.com/company/search.html?kwd={}&pnum={}'.format(
        keyword, 1)

    row_html = download_page(row_url)
    soup = BeautifulSoup(row_html, 'html.parser')
    rowinfo = soup.find('span', class_='total').get_text()
    rowinfo = rowinfo.lstrip('共')
    total_pages = rowinfo.rstrip('页')

    print(keyword + total_pages)
    return total_pages


def main():
    #食品、化妆品、医药、半导体、光电、纺织、玩具、服装
    str_arr = ['食品', '化妆品', '医药', '半导体', '光电', '纺织', '玩具', '服装']

    for str in str_arr:
        spider(str)


def spider(keyword):

    total_pages = get_pages(keyword)
    total_pages = 2

    total_pages = int(total_pages)
    for i in range(1, total_pages):
        url = 'https://s.hc360.com/company/search.html?kwd={}&pnum={}'.format(
            keyword, i)
        html = download_page(url)
        get_content(html, i, keyword)


if __name__ == '__main__':
    main()
