"""
file:spiderwin_ctl.py

author:nxm

describe:

time:2017-8-2
"""
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import gui.spiderwin_ui,time
import urllib.request,urllib.error,re
from multiprocessing.dummy import Pool as ThreadPool

#spider window ui class
class SpiderWindow_UI(QtGui.QMainWindow, gui.spiderwin_ui.Ui_spiderWindow):
    displaysignal = QtCore.pyqtSignal(int)
    spiderfinishsignal = QtCore.pyqtSignal()
    def __init__(self):
        super(QtGui.QMainWindow, self).__init__()  # 调用父类的init函数
        super(gui.spiderwin_ui.Ui_spiderWindow, self).__init__()  # 调用父类的init函数
        self.setupUi(self)

        # 设置窗口图标
        icon = QtGui.QIcon()
        icon.addPixmap(QtGui.QPixmap(":/icon/toolkit.ico"))
        self.setWindowIcon(icon)

        self.spidertestpushButton.connect(self.spidertestpushButton, QtCore.SIGNAL("clicked()"), self.SpiderTestButtonProcess)
        self.spiderclrpushButton.connect(self.spiderclrpushButton, QtCore.SIGNAL("clicked()"),self.SpiderClrButtonProcess)

        self.displaysignal.connect(self.PageSpiderOver)
        self.spiderfinishsignal.connect(self.SpiderReadListDeal)

        self.spiderpagenum = 20#采集页面数量
        self.readlist = []
        self.spidernumcnt = 0
        self.revlist = []

    def PageSpiderOver(self,index):
        self.spidertextBrowser.append("采集完Page:"+str(index)+'\n')

    def SpiderReadListDeal(self):
        judgelist= []
        self.spidertextBrowser.append('-------------------------------------------------------')
        for i in range(len(self.readlist)):
            if self.readlist[i][3] not in judgelist:
                flag = 0
                for read in self.readlist[i+1:]:
                    if self.readlist[i][3] in read :
                        judgelist.append(self.readlist[i][3])#把重复发帖的名字压进去
                        (url,title,authorurl,author,time) = read
                        #self.spidertextBrowser.append("\n"+'帖子:'+url+'作者:'+author)
                        self.spidertextBrowser.append('帖子：'+"<a href=\"%s\"><span style=\" color:#BBFFFF;\">%s</span></a>"%(url,url)+" 作者："+author+" 时间："+time)
                        flag = 1
                if flag == 1:
                    (url,title,authorurl,author,time) = self.readlist[i]
                    self.spidertextBrowser.append('帖子：'+"<a href=\"%s\"><span style=\" color:#BBFFFF;\">%s</span></a>"%(url,url)+" 作者："+author+" 时间："+time)
                    self.spidertextBrowser.append('-----------------------------------------------------------')

        for revread in self.readlist:
            if revread[3] not in judgelist:
                self.revlist.append(revread)
                (url,title,authorurl,author,time) = revread
                if "求租" in title :
                    color = '#C6E2FF'
                elif "西湖"in title or "庆春路" in title or "四季青" in title or "城站" in title or "市民中心" in title or "河坊街" in title:
                    color = '#FF83FA'
                elif "拱墅" in title:
                    color = '#EE0000'
                elif "滨江" in title:
                    color = '#CAFF70'
                elif "文一" in title or "文二" in title or "文三" in title or "天目山" in title or "古荡" in title or "黄龙" in title:
                    color = '#9400D3'
                elif "闸弄口" in title or "西湖文化广场" in title:
                    color = "#1E90FF"
                else:
                    color = '#BBFFFF'
                self.spiderBrowser.append('帖子：'+"<a href=\"%s\"><span style=\" color:%s;\">%s</span></a>"%(url,color,title)+" 作者："+author+" 时间："+time)
        self.spiderBrowser.append('---------------------------------------------------------------------')
        self.spiderBrowser.append('****************无重复的帖子：%s******重复的帖子：%s**********************'%(str(len(self.revlist)), str(len(self.readlist)-len(self.revlist))))


    def getPageHtml(self, index):
        try:
            url = self.spiderurllineEdit.text()+str(index)
            headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'}
            request =  urllib.request.Request(url,headers =headers)
            response = urllib.request.urlopen(request)
            html = response.read().decode('utf-8')
            return html
        except urllib.error.HTTPError as e:
            #QtGui.QMessageBox.warning(None, '错误', "HTTP异常", QtGui.QMessageBox.Ok)
            print(e.code)
            return None
        except urllib.error.URLError as e:
           # QtGui.QMessageBox.warning(None, '错误', "URL异常", QtGui.QMessageBox.Ok)
            print(e.reason)
            return None
        except:
            print("采集异常")
            return None

    def getSpiderDouBan(self, index):
        time.sleep((index%5)/2)
        readpage = self.getPageHtml(index*25)
        if readpage != None:
            pattern = re.compile(r'<tr class.*?<td class="title">.*?<a href="(.*?)".*?title="(.*?)".*?<td.*?href="(.*?)".*?class=.*?>(.*?)</a>.*?class="time">(.*?)</td>',re.S)#采集帖子链接、标题、用户链接、用户名、时间
            testmatch = re.findall(pattern,readpage)
            self.readlist += testmatch
            self.displaysignal.emit(index)

            time.sleep(0.2)
            self.spidernumcnt += 1
            if self.spidernumcnt == self.spiderpagenum:
                self.spiderfinishsignal.emit()


    def SpiderTestButtonProcess(self):
        self.spidertextBrowser.append("开始采集小组数据\n")
        self.spiderpagenum = self.spiderspinBox.value()
        self.readlist = []
        self.revlist = []
        self.spidernumcnt = 0
        pool = ThreadPool(5)#利用multiprocessing的多线程池来进行页面的数据采集
        list = range(self.spiderpagenum)
        pool.map_async(self.getSpiderDouBan,list)#非阻塞的写法
        pool.close()

    def SpiderClrButtonProcess(self):
        self.spidertextBrowser.clear()
        self.spiderBrowser.clear()

    def closeEvent(self, event):
        self.spiderBrowser.clear()
        self.spidertextBrowser.clear()
        self.readlist = []
        self.revlist = []


################################################################################################
################################################################################################

