import requests,time
from multiprocessing.managers import BaseManager





class htmldownloader(object):

        def downloader(self,url):

            if url == None:

                return

            self.headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                 'Host':'www.toutiao.com',
                 'Referer':'https://www.toutiao.com/',
                 'Cookie':'''UM_distinctid=15f32dd7ecca02-0ffd47e89664fe-3b3e5906-1fa400-15f32dd7ecdd9a; uuid="w:7547587fcc234d0e95fd3af0612114de"; tt_webid=6478472404099532302; CNZZDATA1259612802=556198009-1508382043-https%253A%252F%252Fwww.baidu.com%252F%7C1508457929;__tasessionId=tsv5875fx1508462747222; tt_webid=6478472404099532302; WEATHER_CITY=%E5%8C%97%E4%BA%AC'''}

            response=requests.get(url,headers=self.headers,timeout=10)

            if response.status_code ==200:

                response.encoding='utf-8'#返回对象编码设置为utf-8

                return response

            return '服务器访问错误'

class htmlparser(object):
    def parser(self, page_url, page_cont):

        '''
        page_url:下载页面的url
        page_cont:下载页面的数据
        :return: url以及数据
        '''

        self.down = htmldownloader()

        if page_url is None or page_cont is None:
            return '解析数据错误'

        new_urls = self._get_new_urls(page_url, page_cont)

        new_data, data_group = self._get_new_data(page_url, page_cont)

        return new_urls, new_data, data_group

    def _get_new_urls(self, page_url, cont):
        '''

        :param page_url: 下载页面的URL
        :param cont: 下载页面的数据 （response  ajax 要用json进行解析）
        :return: URLS  返回从该页面解析的URL 
        '''
        new_urls = set()

        new_json = cont.json()['data']  # 解析页面的返回的json值

        for json in new_json:

            # if json['label']=='广告':#广告链接 label属性为广告
            a = json.get('label', 'false')

            if a == '广告':

                print('采集到广告，自动跳过')

            else:
                url = 'http://www.toutiao.com/a' + json['group_id'] + '/'

                new_urls.add(url)

        return new_urls

    def _get_new_data(self, page_url, cont):
        '''

        :param page_url: 下载页面的URL
        :param cont: 下载页面的数据 （response  ajax 要用json进行解析）
        :return: 返回从该页面解析的data数据
        '''
        data_set = []

        data_json = cont.json()['data']  # 解析页面的返回的json值

        for json in data_json:

            data = {}

            a = json.get('label', 'false')

            if a == '广告':

                print('采集到广告，自动跳过')

            elif json.get('label', 'false') == 'false':

                print('采集到视频，自动跳过')
                ##所属关键词

            else:
                try:
                    data['label'] = json['label']

                    data['source'] = json['source']  ##作者

                    data['group_id'] = json['group_id']  ##评论地址id

                    data[
                        'group_id_url'] = 'http://www.toutiao.com/api/comment/list/?group_id=%s&item_id=%s&offset=0&count=20' % (
                    json['group_id'], json['group_id'])  ##评论地址id

                    data['url'] = 'http://www.toutiao.com/a' + json['group_id'] + '/'  # max采集页面

                    data['title'] = json['title']  # title  标题

                    group_data = self._get_group_data(data['group_id_url'], data['title'])  # 评论采集
                except:
                    data={'label':'label','source':'source','group_id':'group_id','group_id_url':'group_id_url','group_id':'group_id','url':'url','title':'title'}

                data_set.append(data)

        # print(data_set)



        return data_set, group_data

    def _get_group_data(self, group_id_url, title):
        '''

        :param self: 
        :param group_id_url: 评论地址对象
        :return: 
        '''
        data_group = []

        response = self.down.downloader(group_id_url)

        for json in response.json()['data']['comments']:
            data = {}

            data['title'] = title

            data['text'] = json['text']

            data['userid'] = json['user']['user_id']

            data['username'] = json['user']['name']

            data_group.append(data)

        return data_group



class spider(object):

    def __init__(self):
        # 使用BaseManger注册获取QUEUE的方法名称
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        #链接到服务器

        server_addr='127.0.0.1'

        print('连接到服务器%s'%server_addr)

        self.m=BaseManager(address=(server_addr,5000),authkey='abc'.encode('utf-8'))
        # 与控制端保证口令 密码  端口 编码一直

        self.m.connect()
        #从网络进行链接

        self.task=self.m.get_task_queue()
        self.result =self.m.get_result_queue()
        #获取queue对象

        self.down=htmldownloader()
        self.parse=htmlparser()
        # 实例化rsquest 以及解析页面

        print('配置成功')

    def crawl(self):

        while True:

            try:
                time.sleep(0.5)

                if not self.task.empty():

                    url=self.task.get()

                    if url=='end':

                        print('收到控制节点通知，停止工作')

                        self.result.put({'new_urls':'end','data':'end'})

                        return
                    print('收到url，将该url进行解析:%s '%url)

                    content=self.down.downloader(url)

                    new_urls, data, data_group=self.parse.parser(url,content)

                    self.result.put({'new_urls':new_urls,'data':data,'data_group':data_group})



            except EOFError as e:

                print('链接工作节点失败')

                return
            #except Exception as e:

                #print('出现错误',e)
if __name__=='__main__':
    spider=spider()
    spider.crawl()





































