import requests
from lxml import etree

HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36',
    'Referer':'http://www.dytt8.net/'
}

BASE_DOMAIN = 'http://www.dytt8.net/'
def get_per_url(url):
    response = requests.get(url,headers=HEADERS)
    response.encoding = 'gbk'
    text = response.text
    html = etree.HTML(text)
    detail_urls = html.xpath("//table[@class='tbspan']//a[@class='ulink']/@href")
    for index,detail_url in enumerate(detail_urls):
        if detail_url.endswith("index.html"):
            del detail_urls[index]

    print(len(detail_urls))
    detail_urls = map(lambda url:BASE_DOMAIN+url,detail_urls)
    return detail_urls
def parse_url(url):
    movie = {}
    resp = requests.get(url,headers=HEADERS)
    resp.encoding = 'gbk'
    text = resp.text
    html = etree.HTML(text)
    # 获取标题
    title = html.xpath("//h1//text()")
    movie['title'] = title

    # for title in titles:
    #     print(etree.tostring(title, encoding='utf-8').decode('utf-8'))
    #获取到Zoom所在的所有信息
    ZoomE = html.xpath("//div[@id='Zoom']")[0]
    # 加个点，表示在当前的节点底下查找
    img = ZoomE.xpath('.//img/@src')
    if len(img)>=2:
        cover = img[0]
        screenshot = img[1]
    else:
        cover = img[0]
        screenshot = ""
    movie['cover'] = cover
    movie['screenshot'] =screenshot

    # 定义一个解析info的函数
    def parse_info(info,value):
        return info.replace(value,"").strip()
    # 数据进行清洗和过滤
    infos = ZoomE.xpath('.//text()')
    for index,info in enumerate(infos):
        if info.startswith('◎译　　名'):
            # print(info.replace('◎译　　名',"").strip())
            movie['name'] = parse_info(info,'◎译　　名')
        elif info.startswith('◎产　　地'):
            info = parse_info(info,'◎产　　地')
            movie['city'] = info
        elif info.startswith('◎类　　别'):
            movie['category'] = parse_info(info,'◎类　　别')

        elif info.startswith('◎导　　演'):
            movie['driector'] = parse_info(info,'◎导　　演')

        # 一行数据就是这个列表的一个元素，因此对索引进行判断，判断索引所在的列表元素是否是符合的内容！
        elif info.startswith('◎主　　演'):
            info = parse_info(info,'◎主　　演')
            actors = [info]
            for i in range(index+1,len(infos)):
                actors.append(infos[i].strip())
                if infos[i].startswith('◎'):
                    break
            movie['actors'] = actors
        elif info.startswith('◎简　　介 '):
            profile = []
            for i in range(index+1,len(infos)):
                if not infos[i].startswith('◎'):
                    profile.append(infos[i].strip())
                else:
                    break
            movie['profile'] = profile
    return movie
def main():
    base_url = "http://www.dytt8.net/html/gndy/dyzz/list_23_{}.html"
    movies = []
    for i in range(1,2):
        new_url = base_url.format(i)
        all_urls = get_per_url(new_url)
        for url in all_urls:
            movie = parse_url(url)
            movies.append(movie)
    print(movies)
    print(len(movies))

if __name__ == '__main__':
    main()