from .config import HEADERS
import requests
from lxml import etree
import time
import re

__doc__ = """
    根据用户输入的电影名称，movie_searcher进行搜索，然后默认选取搜索列表第一个搜索结果作为选择电影。
    通过detail_collector获取具体电影信息，选择返回信息中的movie_id和itemreviewed（电影名称）作为短评爬取参数。
    comment_spider根据movie_id爬取短评，默认爬取11页，获得220条短评，然后comment_download存储到csv文件中。
"""


def movie_searcher(keyword, silent=False):
    search_url = 'https://movie.douban.com/j/subject_suggest?q=' + keyword
    response = requests.get(url=search_url, headers=HEADERS)
    data = response.json()
    movie_links = []
    for item in data:
        if item.setdefault('type', None) == 'movie':
            movie_links.append((item['title'], item['url']))
    if not silent:
        for idx, link in enumerate(movie_links, 1):
            print('【选择', str(idx), '】', link[0])
    return movie_links


def detail_collector(checked_movie, silent=False):
    response = requests.get(url=checked_movie, headers=HEADERS)  # 此处获取网页为静态网页，可直接解析
    html_tree = etree.HTML(response.text)  # 网页的HTML层次结构
    moive_id = re.search(r'\d+', html_tree.xpath('/html/head/link[@rel="alternate"]/@href')[0]).group()
    # 剔除英文逗号，以便pandas读取csv时分隔正确
    itemreviewed = html_tree.xpath('//span[@property="v:itemreviewed"]/text()')[0].replace(',', '，')  # 电影名称
    # show_year = html_tree.xpath('//span[@class="year"]/text()')[0]  # 上映时间
    show_year = html_tree.xpath('//span[@property="v:initialReleaseDate"]/text()')[0]  # 上映时间
    director = html_tree.xpath('//*[@id="info"]/span[1]/span[2]/a/text()')[0]  # 导演
    value = re.findall('<span class="pl">制片国家/地区:</span>(.*?)<br/>', response.text)  # 制片国家/地区
    country = "/".join(value).replace(' ', '').replace('中国大陆', '中国').replace('中国香港', '中国').replace('中国台湾', '中国')
    genre_source = html_tree.xpath('//span[@property="v:genre"]/text()')  # 影片类型
    genre_list = '/'.join(genre_source)
    runtime = html_tree.xpath('//span[@property="v:runtime"]/text()')[0]  # 片长
    average = html_tree.xpath('//strong[@property="v:average"]/text()')[0]  # 豆瓣评分
    summary_source = html_tree.xpath('//span[@property="v:summary"]/text()')  # 剧情简介
    summary = ''.join(
        re.findall('[\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b\u4e00-\u9fa5]',
                   ''.join(summary_source)))  # 匹配中文和标点符号
    if not silent:
        # print('选中电影链接为:', checked_movie)
        # print('电影ID:', moive_id)
        print('电影名称:', itemreviewed)
        # print('上映时间:', show_year)
        # print('导演:', director)
        # print('制片国家:', country)
        # print('类型:', genre_list)
        # print('片长:', runtime)
        # print('评分:', average)
        # print('剧情简介:', end='')
        for idn, s in enumerate(summary, 1):  # 模板输出
            print(s, end='')
            if idn % 66 == 0:
                print()
        print("\n" + "=" * 150)
    return html_tree, moive_id, itemreviewed, show_year, director, country, genre_list, runtime, average, summary


def comment_spider(movie_id, perType='h', total_pages=10, silent=False):
    # 短评网页也为静态网页，start从0开始，每页20条短评；new_score为按热门排序，time为按最新排序
    comment_url = 'https://movie.douban.com/subject/{}/comments?percent_type={}&start={}&limit=20&status=P&sort=new_score'
    star_rating, comment_time, comment_content = [], [], []
    for page_index in range(total_pages + 1):
        item_url = comment_url.format(movie_id, perType, page_index * 20)
        res = requests.get(url=item_url, headers=HEADERS)
        comment_html = etree.HTML(res.text)
        star_rating_list = comment_html.xpath('//span[@class="comment-info"]/span[2]/@title')  # 短评星级

        if not star_rating_list:  # 页面未提取到内容证明已经没有评论
            print('短评页数到底了')
            break
        comment_time_list = comment_html.xpath('//span[@class="comment-info"]/span[3]/@title')  # 评论时间
        comment_source_list = comment_html.xpath('//p[@class=" comment-content"]/span/text()')  # 评论内容
        comment_content_list = []
        for content in comment_source_list:
            # 剔除英文逗号，以便pandas读取csv时分隔正确
            comment_content_list.append(content.replace('\r', '').replace('\n', '').replace(',', '，'))
        star_rating.extend(star_rating_list)
        comment_time.extend(comment_time_list)
        comment_content.extend(comment_content_list)
        time.sleep(3)  # 控制爬取速度
    return star_rating, comment_time, comment_content


def comment_download(ones, twos, threes, filename):
    movie_info = [['ID', '观众推荐程度', '评论时间', '短评内容']]
    idx = 1
    for one, two, three in zip(ones, twos, threes):
        movie_info.append([str(idx), one, two, three])
        idx += 1
    with open('dataset/' + filename, 'w', encoding='utf8')as f:
        for info in movie_info:
            f.write(','.join(info) + '\n')


def crawl_comment_as_csv(search_word):
    movie_links = movie_searcher(keyword=search_word)
    checked_moive = movie_links[0][1]
    _, movie_id, itemreviewed, _, _, _, _, _, _, _ = detail_collector(checked_moive)

    def download(perType, csv_name):
        star_rating, comment_time, comment_content = comment_spider(movie_id=movie_id, perType=perType)
        comment_download(star_rating, comment_time, comment_content, itemreviewed + csv_name)
        # for star, ctime, comment in zip(star_rating, comment_time, comment_content):
        #     print(star, ctime, comment)

    download('h', '_好评.csv')
    download('m', '_中评.csv')
    download('l', '_差评.csv')

    print('【' + itemreviewed + "】的豆瓣短评爬取完毕，已存储至dataset文件夹")
    return itemreviewed


# if __name__ == '__main__':
    # search_word = input('请输入搜索电影名称:')
    # crawl_comment_as_csv(search_word='黑客帝国')
