import requests
import re
import os
import sys
from fake_useragent import UserAgent
from lxml import etree
pwd = os.getcwd()
sys.path.append(pwd)
# jiancezhan数据库
from sql import sql_jiancezhan_model
ua = UserAgent()

# 获取所有城市名和对应的url
def get_city():
    url = 'http://www.bendibao.com/city.htm'
    headers = {
        'User-Agent': ua.chrome
    }
    response = requests.get(url, headers=headers)
    response.encoding = 'utf8'

    html = etree.HTML(response.text)
    city_div = html.xpath('//div[@class="city-list"]/dl')
    with open('spider/jiancezhan/city.txt', 'a', encoding='utf8') as f:
        for dl in city_div:
            shengfen = dl.xpath('dt/text()')[0]
            city_info_list = dl.xpath('dd/a')
            for city_info in city_info_list:
                city_url = city_info.xpath('@href')[0]
                city_name = city_info.xpath('text()')[0]
                print(shengfen, city_name, city_url)
                f.write(shengfen + ',' + city_name + ',' + city_url + '\n')

# 遍历所有城市
def parse_city():
    f = open('spider/jiancezhan/city.txt', 'r')
    city_info_list = f.readlines()
    f.close()
    for city_info in city_info_list:
        left_url = city_info.split(',')[-1].strip()
        province = city_info.split(',')[0]
        city = city_info.split(',')[1]
        # 解析page页面中的信息
        parse_page(left_url, province, city)

# 解析page页面中的信息
def parse_page(left_url, province, city, page_url='list1.htm'):
    url = left_url + 'wangdian/jdcaqjsjy/' + page_url
    headers = {
        'User-Agent': ua.chrome
    }
    print(url)
    response = requests.get(url, headers=headers)
    response.encoding = 'utf8'

    html = etree.HTML(response.text)
    jiancezhan_info_list = html.xpath('//*[@id="content"]/ul[@class="catalist"]/li/div[@class="infoschema"]')
    for jancezhan_info in jiancezhan_info_list:
        name = jancezhan_info.xpath('h3/a/text()')[0]
        detail_url = left_url + jancezhan_info.xpath('h3/a/@href')[0]
        address = jancezhan_info.xpath('p[1]/text()')[0].strip()[5:]
        telephone = jancezhan_info.xpath('p[2]/text()')[0].strip()[3:]
        print('name:', name)
        print('detail_url:', detail_url)
        print('address:', address)
        print('telephone:', telephone)
        # 解析详情页中的信息
        business_hours, desc = parse_detail(detail_url)
        print('str_time:', business_hours)
        print('desc:', desc)
        jiancezhan = sql_jiancezhan_model.JianCeZhan(province=province, city=city, name=name, address=address, telephone=telephone, business_hours=business_hours, desc=desc, detail_url=detail_url)
        sql_jiancezhan_model.insert(jiancezhan)

    next_page_li = html.xpath('//*[@id="content"]/ul[@class="paginator"]/li')[-2]
    try:
        next_page_url = next_page_li.xpath('a/@href')[0]
        print('next_page_url:', next_page_url)
        parse_page(left_url, province, city, next_page_url)
    except IndexError:
        print('最后一页')

# 解析详情页中的信息
def parse_detail(detail_url):
    headers = {
        'User-Agent': ua.chrome
    }
    response = requests.get(detail_url, headers=headers)
    response.encoding = 'utf8'

    html = etree.HTML(response.text)
    jiancezhan_name = html.xpath('//*[@id="content"]/div[2]/h1/text()')[0]
    jiancezhan_info = html.xpath('//*[@id="content"]/div[@class="card"]/p/text()')
    str_info = ''.join(jiancezhan_info)
    business_hours = re.search(r'服务时间：(.*)', str_info).group(1)
    desc = html.xpath('//*[@id="content"]/div[@class="card"]/p/text()')[-2].strip()
    # print(jiancezhan_name, str_time)
    return business_hours, desc

# get_city()
parse_city()



