import requests
from lxml import etree
import random

header_list = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) "
            "Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) "
            "Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]

def get_headers():
    headers = {
        "User-Agent": random.choice(header_list),
        "referer": "http://www.liebiao.com",
    }
    return headers

def get_city():
    city_list = []
    response = requests.get(url="http://www.liebiao.com/")
    html = etree.HTML(response.text)
    all_city = html.xpath("//div[@class='box w_d']/dl/dd/a")
    for ci in all_city:
        city_name = ci.xpath("./text()")[0]
        city_href = ci.get('href')
        city_list.append({'city_name': city_name, 'city_url': city_href})
    return city_list

def get_pages(city_url=""):
    ''' 获取城市下面分页的url'''
    page_list = []
    max_page_num = get_max_page_num(city_url)
    page_list.append(city_url)
    if max_page_num == 1:
        pass
    else:
        for i in range(2, max_page_num+1):
            city_page_url = "{}/index{}.html".format(city_url, i)
            page_list.append(city_page_url)
    print(len(page_list))
    print(page_list)
    return page_list

def get_max_page_num(city_url=""):
    # 01 请求url, 匹配页数
    response = requests.get(url=city_url)
    html = etree.HTML(response.text)
    href_list = html.xpath("//ul[@class='pagination']/li/a/@href")
    # 02 获取正确的strip
    strip_before = city_url
    if '/index' in city_url:
        strip_before = city_url.split('/index')[0]
    strip_end = '.html'
    # 03 是否最后一页
    end_page = html.xpath("//ul[@class='pagination']/li[@class='next disabled']")

    # 03.1 如果最后一页, 则返回最大页数
    if len(end_page):
        page_max_num = int(city_url.strip(strip_before).strip(strip_end))
        print('01: ', page_max_num)
        return page_max_num
    # 03.2 如果不是最后一页, 递归求值
    else:
        page_num_list = [int(x.strip(strip_before).strip(strip_end)) for x in href_list if x.strip(strip_before).strip(strip_end)]
        if len(page_num_list) == 0:
            return 1
        page_max_num = max(page_num_list)
        city_url_page = "{}/index{}.html".format(strip_before, page_max_num)
        print('02', page_num_list, city_url, page_max_num, city_url_page)
        return get_max_page_num(city_url=city_url_page)

def get_company_info(company_url=""):
    """获取培训班详情"""

    response_wd4 = requests.get(url=company_url, headers=get_headers())
    html_wd4 = etree.HTML(response_wd4.text)

    name_a = html_wd4.xpath("//div[@class='main']/div[@class='post-info']")[0]

    a = name_a
    name = a.xpath("//div[@class='post-title']/h1[@class='ellipsis']/text()")[0]
    lianxiren = ''.join(a.xpath("//div[@class='field-wrap']/dl[2]/dd[@class='field-detail']/span/text()"))
    phone = ''.join(a.xpath("//div[@class='contact-way-wrap']/button[@class='btn-check-phone click_btn']/@data-phone"))
    renz = a.xpath("//div[@class='field-wrap']/dl/dd[@class='field-detail']/i/@title")
    renz = ','.join(renz)
    jieshao = a.xpath("string(./div[@class='post-content']/div[@class='content-wrap'])")
    jieshao = ''.join(jieshao)

    # print(name)
    # print(lianxiren)
    # print(phone)
    # print(renz)
    # print(jieshao)
    res = {}
    res["company_url"] = company_url
    res["title"] = name
    res["lianxiren"] = lianxiren
    res["phone"] = phone
    res["renzheng"] = renz
    res["desc"] = jieshao
    return res

def get_company_url_from_page_url(page_url=""):
    res = requests.get(url=page_url, headers=get_headers())
    html_wd4 = etree.HTML(res.text)
    company_url_list = html_wd4.xpath("//h2[@class='post-title ellipsis']/a/@href")
    return company_url_list


if __name__ == '__main__':
    city_url = "http://anshan.liebiao.com/wudaopeixun"
    get_max_page_num(city_url=city_url)
    #page_list = get_pages(city_url=city_url)
    #company_url = "http://akesu.liebiao.com/caiyipeixun/243002551.html"
    #get_company_info(company_url=company_url)
    #page_url = "http://akesu.liebiao.com/wudaopeixun/"
    #get_company_url_from_page_url(page_url=page_url)

