import time
import os

import pymysql

from JobData.Job51_shezhao import html_parser,sql_connect
from JobData.Job51_shezhao import html_downloader
from Common import countTime as CT,sql_content,Fession_mini

class SpiderMain(object):
    def __init__(self):

        self.sql_content = sql_content.sql_content()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.sql = sql_connect.Sql()
        self.all_data = []

        self.totalCT = CT.get_time() #Getting and Setting the 'START TIME' of type-page-1

        self.consume_time = [] #Use list storage the value of time consuming
        self.type1pageCT = 0 #this is a value of time consuming of 1-type of per time.

        self.thisStrList = []
        self.totalStrSum = 0

    def get_rate(self):
        return self.consume_time
    def get_CT(self):
        return self.consume_time
    def get_totalCT(self):
        return CT.difference(CT.get_time(), self.totalCT)

    # def set_interrupt(self,interrupt): #该功能迁移到Fession-Mini文件中
    #     self.interrupt = interrupt
    # def get_interrupt(self):
    #     return self.interrupt
    # def set_cease(self,cease):
    #     self.cease = cease
    # def get_cease(self):
    #     return self.cease

    def get_data(self,min,max):
        print('51job用于后台刷新实时数据',self.into_sql(min, max))
        return self.into_sql(min,max)

    #爬虫的调度程序
    def craw(self,root_url):
        print("怎么回事啊，没过来吗？？")
        Fession_mini.setFessionMini('self.interrupt', 1)  # Control the value of the interruption
        Fession_mini.setFessionMini('self.cease', 1)  # Control the stop value
        root_links = []#51Job社招所有目标页面链接
        #抓取所有社会招聘招聘页面的链接
        for i in range(1,10):#共有2000页#101

            if Fession_mini.getFessionMini('self.cease') == -1:
                break
            elif Fession_mini.getFessionMini('self.interrupt') == -1:
                time.sleep(2)
                while Fession_mini.getFessionMini('self.interrupt') == -1:
                    time.sleep(2)

            type1pagestartT = CT.get_time()

            l = list(root_url)
            l[62] = str(i)
            now_url = ''.join(l)
            #print('这是链接' , now_url)
            root_html_cont = self.downloader.download(now_url)
            root_link = self.parser.get_all_state(root_html_cont) #51job官网root_html的url,是一个set

            type1pageendT = CT.get_time()
            self.type1pageCT = CT.difference(type1pagestartT, type1pageendT)
            Fession_mini.setFessionMini('type1pageCT',self.type1pageCT) #将type1pageCT添加进Fession
            #print("第一次的时间，spider_main",self.type1pageCT)

            #root_links.extend(root_link)
            # 对所有链接爬取数据
            for i in range(len(root_link)):  # 如果有待爬取得link
                print("\033[0;31m%s\033[0m" % "中断值为："+ str(Fession_mini.getFessionMini('self.interrupt')))
                if Fession_mini.getFessionMini('self.cease') == -1:
                    break
                elif Fession_mini.getFessionMini('self.interrupt') == -1:
                    time.sleep(2)
                    while Fession_mini.getFessionMini('self.interrupt') == -1:
                        time.sleep(2)

                type2pagestartT = CT.get_time()

                try:
                    new_url = root_link[i]  # 获取待爬取得url
                    print("new_url:", new_url)
                    html_cont = self.downloader.download(new_url)  # 下载器下载页面
                    new_data , thisStrSum = self.parser.parse(new_url, html_cont)  # 解析器解析页面

                    self.thisStrList.append(thisStrSum)
                    self.totalStrSum += thisStrSum

                    type2pageendT = CT.get_time()
                    self.consume_time.append(CT.difference(type2pagestartT, type2pageendT)+self.type1pageCT)
                    #print("每一次的用时，妈的，怎么能错呢！，spider_main", self.consume_time)

                    Fession_mini.setFessionMini('self.thisStrList', self.thisStrList) #每次爬取数据量列表
                    Fession_mini.setFessionMini('self.totalStrSum', self.totalStrSum)

                    Fession_mini.setFessionMini('consume_time',self.consume_time)
                    Fession_mini.setFessionMini('nowDataText',new_data) # writed the accumulating value of the current time

                    if len(self.all_data) <= 15 and len(self.all_data) is not None:
                        self.all_data.insert(0,new_data)
                    elif len(self.all_data) > 15 and len(self.all_data) is not None:
                        self.all_data.insert(0, new_data)
                        self.all_data.pop(15)


                    Fession_mini.setFessionMini('nowAllDataText',self.all_data) # writed the value all of current time

                    self.sql.output_sql(new_data)
                    # new_data是一个包含所有页面的所有信息的集合列表
                    print('a test point:', new_data, '\n', len(new_data))
                except:
                    print('craw failed')
            self.downloader.__init__()
            self.parser.__init__()
        #print('51Job社招所有链接 : %s %d' % (root_links , len(root_links)))#%---ceshi---%
            print(self.consume_time)
        #print('len(new_data)',len(new_data))
        #self.output_sql(new_data)

    def get_all_url(self):
        root_url = "http://search.51job.com/list/000000,000000,0100,01,9,99,%2B,2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
        obj_spider = SpiderMain()
        obj_spider.craw(root_url)


    def into_sql(self, min, max):
        print(min, max)
        job51_all = []
        topic = []
        con = self.sql_content.conten()
        sql_into = "select * from data_51job_she WHERE %d <= Data_Id and Data_Id<= %d" % (min, max)
        sql_topic = "select COLUMN_NAME from information_schema.COLUMNS where table_name ='data_51job_she'"
        cursor = con.cursor()
        try:
            cursor.execute(sql_into)
            rs = cursor.fetchall()
            for row in rs:
                list_row = list(row)
                list_row[6] = list_row[6][:150] + '....'
                list_row[7] = list_row[7][:150] + '....'
                # # print(list_row[3])
                job51_all.append(list_row)
            cursor.execute(sql_topic)
            rst = cursor.fetchall()
            for row in rst:
                list_row_t = list(row)
                topic.append(list_row_t)
        except Exception as e:
            print(e)
            con.rollback()
        cursor.close()
        con.close()
        print(topic)
        return job51_all, topic

if __name__ == "__main__":
    root_url = "http://search.51job.com/list/000000,000000,0100,01,9,99,%2B,2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
    obj_spider = SpiderMain()
    #obj_spider.craw(root_url)
    obj_spider.get_data(1,10)

