import pickle ,hashlib,time
from get import get
from dataout import dataout
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
class urlmanger(object):

    def __init__(self):

        self.old_urls = self.load_progress('oldurls.txt')#已被爬取队列集合加载到队列中

        self.new_urls = self.load_progress('newurls.txt')#未被爬取队列集合加载到队列中

    def get_new_url(self):

        url=self.new_urls.pop()#队列中取出url

        self.old_urls.add(url)#已被爬取队列中放入url

        return  url
    def new_urls_size(self):

        return len(self.new_urls)#返回未被爬取的集合大小

    def old_urls_size(self):

        return len(self.old_urls)#返回已被爬取的集合大小

    def has_new_url(self):

        return  self.new_urls_size() !=0#判断队列任务是否为0

    def add_new_url(self, url):#添加单个url进到集合里面

            if url == None:

                return

            # url_md5=hashlib.md5()

            # url_md5.update(url)

            # # 将url 进行md5加密

            # url_md=url_md5.hexdigest()[8:-8]

            # print(url,url_md)

            if url not in self.new_urls and url not in self.old_urls:

                self.new_urls.add(url)

    def add_new_urls(self,urls):

        if urls is None or len(urls) == 0:#判断传递进来的集合是否为空

            return

        for url in urls:

            self.new_urls.add(url)

    def save_progress(self,path,data):
    	# path  保存地址
    	# data   需要保存的数据
    	with open(path,'wb') as f:
            pickle.dump(data,f)

    def load_progress(self,path):
        '''
        从本地文件中去加载进度
        :param path: 
        :return: 
        '''
        if path == None:
            return
        try:
            with open(path,'rb') as  f:
                progress=pickle.load(f)
                return progress
        except:
            print('在%s路径下未寻找到相关文件')

        return set()


    '''
    result_q 爬虫节点将数据返回给数据提取进程的通道
    conn_q  爬虫节点将解析出来的url返回给url管理器的通道
    store_q  是数据提取进程将获取到的队列交给数据存储进程的通道
    url_q    是url管理进程将url 传递给爬虫节点的通道
    '''
class node_manger(object):
    def start_manger(self,url_q,result_q):
            '''
            
            :param self: 
            :param url_q: url通信队列
            :param result_q: 结果队列
            :return: 
            把创建的两个队列注册一下，用register方法，callable回调  参数关联了QUEUE对象 
            #将QUEUE注册在网络上
            '''
            BaseManager.register('get_task_queue',callable=lambda:url_q)
            BaseManager.register('get_result_queue', callable=lambda: result_q)

            # 绑定端口5000 设置验证口令root 这个相当于对象的初始化
            manger=BaseManager(address=('127.0.0.1', 5000), authkey='abc'.encode('utf-8'))

            return  manger
    def url_manger_proc(self,url_q,conn_q,root_url):

            url_manger=urlmanger()

            url_manger.add_new_url(root_url)

            while True:

                while(url_manger.has_new_url()):


                    #从url管理器获得新url
                    new_url=url_manger.get_new_url()

                    # 将新的url发给工作节点
                    url_q.put(new_url)

                    print('已经发送%s个任务' % url_manger.old_urls_size(),'任务链接：%s'%new_url)

                    if url_manger.old_urls_size()>150:
                        # 如果已经爬取任务的队列大雨10 那么停止

                        url_q.put('end')
                        # 发送结束通知
                        #关闭管理节点同时储存set状态 断点
                        url_manger.save_progress('c:/newurls.txt',url_manger.new_urls)
                        url_manger.save_progress('c:/oldurls.txt',url_manger.old_urls)

                        print('已经爬取任务队列超过10个，通知爬虫节点暂停')

                        return
                    try:

                        # if not conn_q.empty(): #如果数据提取返回URL的队列中有返回的URL 将所有提取的URL都返回回来
                        #
                        #     urls=conn_q.get()
                        #
                        #     url_manger.add_new_urls(urls)
                        time.sleep(1)
                        url_manger.add_new_url(get().get_as_cp())

                        while url_manger.has_new_url()>50:

                            time.sleep(1)

                    except BaseException as e :

                        print('出现错误 延时休息')
    def result_solve_proc(self,result_q,conn_q,store_q):#数据提取进程
            while True:
                try:
                    if not result_q.empty():

                        content=result_q.get()

                        if content['new_urls']=='end':#如果从队列中取出来的是结束

                           #传递过来爬虫传递过来的数据

                            result_q.put('end')

                            return

                        store_q.put(content)

                    else:
                        time.sleep(0.1)
                except BaseException as e:
                    time.sleep(0.1)
    def store_proc(self,store_q):
            output=dataout()
            #初始化数据存储
            while True:

                if not store_q.empty():

                    data=store_q.get()

                    if data['data'] =='end':

                        print('存储进程接收end通知然后结束')

                        return

                    print('存储数据%s'%data)

                    output.output(data['data'],data['data_group'])




                else:
                    time.sleep(0.1)


if __name__ =='__main__':
    # 初始化4个队列
    url_q=Queue()
    result_q=Queue()
    conn_q=Queue()
    store_q=Queue()
    #创建分布式url管理器
    node=node_manger()

    manger=node.start_manger(url_q,result_q)

    #创建url管理进程，数据提取进程，数据存储进程
    root_url=get().get_as_cp()

    url_manger_proc=Process(target=node.url_manger_proc,args=(url_q,conn_q,root_url))

    result_solve_proc=Process(target=node.result_solve_proc,args=(result_q,conn_q,store_q))

    store_proc=Process(target=node.store_proc,args=(store_q,))

    url_manger_proc.start()

    result_solve_proc.start()

    store_proc.start()

    manger.get_server().serve_forever()
