"""
    @author yu_chen
    @date  2019年09月21日18:37:47
    @todo 
"""
import requests
import json
import time
from lxml import etree

from spider_common.DataUtil import DataUtil
from spider_common.Proxy import Proxy


class GetComments(object):
    def __init__(self):
        self.headers = {
            'Referer': 'http://music.163.com/',
            'Host': 'music.163.com',
            'Accept-Language': "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
            'Accept-Encoding': "gzip, deflate",
            'Content-Type': "application/x-www-form-urlencoded",
            'Origin': 'https://music.163.com',
            'Connection': "keep-alive",
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'
                          ' (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
        }
        # 构造会话
        self.session = requests.session()
        # 设置代理
        self.proxy = Proxy()
        self.current_songs_list_name = ''

    def send_request(self, url):
        """
        发送http请求
        :param url:请求的url
        :return:
        """
        try:
            # response = self.session.get(url, headers=self.headers)
            response = self.session.get(url, headers=self.headers, proxies=self.get_proxy())
        except:
            self.get_proxy()
            response = self.session.get(url, headers=self.headers, proxies=self.get_proxy())
        return response

    def get_proxy(self):
        # 设置代理
        self.proxies = {
            'https': self.proxy.get_proxy_url()
        }
        return self.proxies

    def get_json(self, song_id, offset):
        """
        获取json数据
        :param song_id: 歌曲id
        :param offset: 评论偏移量
        :return: json转成的dict
        """
        url = 'http://music.163.com/api/v1/resource/spider_comments/R_SO_4_%s?limit=20&offset=%s' % (song_id, offset)
        # print(url)
        responses = self.send_request(url).content

        json_dict = json.loads(responses)
        return json_dict

    def structure_url(self, song_id):
        """
        先获取评论总数，再分页爬取
        :param song_id: 歌曲id
        :return:
        """
        json_dict = self.get_json(song_id, 0)
        print(json_dict)
        comments_num = int(json_dict['total'])  # 获取评论总数目
        if not comments_num % 20:
            page = comments_num / 20
        else:
            page = int(comments_num / 20) + 1
        for i in range(page):
            comments_list = []
            time.sleep(0.6)
            json_dict = self.get_json(song_id, i * 20)
            # print(page * 20)
            for item in json_dict['spider_comments']:
                comment = item['content'].strip().replace("\n", "")  # 获取评论内容 并去掉换行符
                liked_count = item['likedCount']  # 点赞总数
                if liked_count > 10 and len(comment) > 10:
                    comment_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(item['time'] / 1000))  # 获取评论时间
                    comment_info = comment_time + '\t||\t' + str(liked_count) + '\t||\t' + comment + '\n'
                    comments_list.append(comment_info)
            DataUtil.save_data_to_txt(comments_list, self.current_songs_list_name)
            print('第 %s 页获取完成.' % i)

    def get_songs_id(self, url):
        """
        :param url: 主页链接
        :song_list_id url: 歌单的ID
        :return: 所有歌曲名字，id
        """
        html = self.send_request(url)
        text = etree.HTML(html.text)
        songs_name = text.xpath('//div[@id="song-list-pre-cache"]//ul//a/text()')
        # 获取歌曲id
        songs_id = text.xpath('//div[@id="song-list-pre-cache"]//ul//a/@href')
        songs_id = [s_id[9:] for s_id in songs_id]
        print(songs_name)
        print(songs_id)
        for i in range(len(songs_name)):
            try:
                self.structure_url(songs_id[i])
                print('正在收集 %s 的评论' % songs_name[i])
            except Exception as e:
                self.get_proxy()
                print('收集 %s 的评论出错' % songs_name[i])
