import random
import unicodedata

import requests
from lxml import etree
from scrapy import Selector
import base64
USER_AGENTS = [
            "Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1"
            "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
            "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
            "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
            "Opera/9.80 (Windows NT 5.1; U; zh-cn) Presto/2.9.168 Version/11.50",
            "Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
            "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0",
            "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)",
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.2)",
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
            "Mozilla/4.0 (compatible; MSIE 5.0; Windows NT)",
            "Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3",
            "Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12 "
        ]
        # 这是ip代理池，待会会随机选取其中之一
IP_AGENTS = [{'HTTP': '60.170.204.30:8060'},
                          {'HTTP': '111.3.118.247:30001'},
                          {'HTTP': '220.168.52.245:53548'},
                          {'HTTP': '202.116.32.236:80'},
                          {'HTTP': '14.215.212.37:9168'},
                          {'HTTP': '39.106.71.115:7890'},
                          {'HTTP': '220.168.52.245:53548'},
                          {'HTTP': '202.55.5.209:8090'},
                          {'HTTP': '118.163.120.181:58837'},
                          {'HTTP': '121.13.252.62:41564'},
                          {'HTTP': '106.54.128.253:999'},
                          {'HTTP': '202.55.5.209:8090'},
                          {'HTTP': '210.5.10.87:53281'},
                          {'HTTP': '202.55.5.209:8090'},
                          {'HTTP': '112.6.117.135:8085'},
                          {'HTTP': '61.150.96.27:36880'},
                          {'HTTP': '106.15.197.250:8001'},
                          {'HTTP': '202.109.157.65:9000'},
                          {'HTTP': '112.74.17.146:8118'},
                          {'HTTP': '183.236.123.242:8060'},
                          {'HTTP': '220.168.52.245:53548'},
                          {'HTTP': '103.37.141.69:80'},
                          {'HTTP': '218.75.69.50:57903'},
                          {'HTTP': '202.55.5.209:8090'},
                          {'HTTP': '202.55.5.209:8090'},
                          {'HTTP': '113.88.208.112:8118'},
                          {'HTTP': '122.9.101.6:8888'},
                          {'HTTP': '47.113.90.161:83'},
                          {'HTTP': '106.15.197.250:8001'},
                          {'HTTP': '61.216.156.222:60808'}, ]

# def parseurl(i,path):
#     list1=[]
#     with open(path, "r", encoding="utf-8") as rd:
#         lines=rd.readlines();
#         for line in lines:
#             list1.append(line.replace('\n',''))
#     list1 = list1[i:]
#     return list1


def pachong_movie(url):
    headers = {'User-Agent': random.choice(USER_AGENTS)}
    proxy = random.choice(IP_AGENTS)
    resp = requests.get(url, headers=headers,proxies=proxy).text  # 页面源代码
    #print(resp)
    html = etree.HTML(resp)
    selector = Selector(text=resp)

    out_data = html.xpath("//*[@id='info']/text()")
    out_data = [i.replace('\n', '').strip().replace('/', '') for i in out_data]
    out_data = [i for i in out_data if i != '']


    imdb = out_data[-1]

    img = html.xpath("//div[@id='mainpic']/a/img/@src")[0]


    name = html.xpath("//div[@id='content']/h1/span[1]/text()")[0]
    name = name.split(" ")[0]

    bieming = out_data[-2]


    date = html.xpath("//span[@property='v:initialReleaseDate']/text()")[0]
    date = date.split("(")[0]
    #print(date)


    director = selector.xpath("//div[@id='info']/span[1]/span[2]")
    director = director.xpath('string(.)').extract()[0]


    if selector.xpath("//div[@id='info']/span[2]/span[2]"):
        scriptwrit = selector.xpath("//div[@id='info']/span[2]/span[2]")
        scriptwriter = scriptwrit.xpath('string(.)').extract()[0]
    else:
        scriptwriter = '无'


    if selector.xpath("//div[@id='info']/span[3]/span[2]"):
        act = selector.xpath("//div[@id='info']/span[3]/span[2]")
        actor = act.xpath('string(.)').extract()[0]
    else:
        actor = '无'


    type = html.xpath("//span[@property='v:genre']/text()")[0]


    area = out_data[0]


    language = out_data[1]


    remark_count = html.xpath("//*[@id='interest_sectl']/div[1]/div[2]/div/div[2]/a/span/text()")[0]


    intro = html.xpath("//span[@property='v:summary']/text()")[0]
    introduction = intro.replace(' ', '')
    introduction = introduction.replace('\n', '')



    lasts_time = html.xpath("//span[@property='v:runtime']/text()")[0]
    lasts_time = lasts_time.split("(")[0]
    #print(lasts_time)
    score = html.xpath("//div[@id='interest_sectl']/div[1]/div[2]/strong/text()")[0]

    five_star = html.xpath("//div[@id='interest_sectl']/div[1]/div[3]/div[1]/span[2]/text()")[0]
    four_star = html.xpath("//div[@id='interest_sectl']/div[1]/div[3]/div[2]/span[2]/text()")[0]
    three_star = html.xpath("//div[@id='interest_sectl']/div[1]/div[3]/div[3]/span[2]/text()")[0]
    two_star = html.xpath("//div[@id='interest_sectl']/div[1]/div[3]/div[4]/span[2]/text()")[0]
    one_star = html.xpath("//div[@id='interest_sectl']/div[1]/div[3]/div[5]/span[2]/text()")[0]

    comments_str = imdb + "@@"
    comments = html.xpath('//*[@id="reviews-wrapper"]/div[2]/div')
    for comment in comments:
        if comment.xpath('./div/header/a[1]/img/@src'):
            comment_img = comment.xpath('./div/header/a[1]/img/@src')[0]
        else:
            comment_img = "无"
        comments_str = comments_str + comment_img + "&&"

        if comment.xpath('./div/header/a[2]/text()'):
            comment_name = comment.xpath('./div/header/a[2]/text()')[0]

        else:
            comment_name = "无"
        comments_str = comments_str + comment_name + "&&"

        if comment.xpath('./div/header/span[2]/text()'):
            comment_time = comment.xpath('./div/header/span[2]/text()')[0]
        else:
            comment_time = "无"
        comments_str = comments_str + comment_time + "&&"

        comment_title = comment.xpath('./div/div/h2/a/text()')[0]
        comment_title = comment_title.replace('\n','')
        comment_title = comment_title.replace(' ', '')
        comments_str = comments_str + comment_title + "&&"


        comment_content = comment.xpath("./div/div/div[1]/div/text()")[0]
        comment_content = comment_content.replace('\n','')
        comment_content = comment_content[:-2].strip()
        comment_content = "".join(ch for ch in comment_content if unicodedata.category(ch)[0] != 'S')
        comments_str = comments_str + comment_content + "@@"


    detail_str = imdb+"&&&"+img+"&&&"+name+"&&&"+bieming+"&&&"+date\
                 +"&&&"+director+"&&&"+scriptwriter+"&&&"+actor+"&&&"\
                 +type+"&&&"+area+"&&&"+language+"&&&"+remark_count\
                 +"&&&"+introduction+"&&&"+lasts_time+"&&&"+score+"&&&"\
                 +five_star+"&&&"+four_star+"&&&"+three_star+"&&&"+two_star+"&&&"+one_star

    return detail_str,comments_str
    #yield (introduction, actor, scriptwriter, five_star, four_star, three_star, two_star, one_star)



def movie_detail(line):
    temp =line.replace('\n', '').split("&&&")
    _tuple = tuple(temp)
    return _tuple


def movie_comment(line):
    temp =line.replace('\n', '').split("@@")
    comments = temp[1:-1]
    imdb = temp[0]
    list = []
    for comment in comments:
        comment = comment.split("&&")
        _tuple = (imdb,)+tuple(comment)
        a = bytes(_tuple[2], 'utf-8')
        name = base64.b64encode(a)
        r_tuple = _tuple[0:2] + (name,) + _tuple[3:]
        if r_tuple[-1]:
            list.append(r_tuple)
    return list



if __name__ == '__main__':
    detail,comment = pachong_movie('https://movie.douban.com/subject/25662329/')
    print(movie_detail(detail))
    print(movie_comment(comment))