import datetime
import random

import pymysql
import time

from Common import sql_content
from JobData.xiaomi_xiaoyuan import html_downloader
from JobData.lagou import html_parser

class SpiderMain(object):
    def __init__(self):
        self.sql_content = sql_content.sql_content()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.new_data = []
        self.j = 0
        self.root_links=[]#Storage all links of 1 types
        self.return_data = []

    def get_data(self,min,max):
        print('lagou_main，用于后台刷新实时数据',self.into_sql(min, max))
        return self.into_sql(min,max)
    """
    Entry program
    if the target link is divided into 3 types:
    target 1 : Dynamic links into the summary information page
    target 2 : The all link of the page of the summary information
    target 3 : The links into the Details
    """


    def craw(self,root_url):
        root_links = []#Storage all links of 1 types
        #capture all links in all page of 1 based on all 1 types of links
        #The total number of 1 is 30 pages
        i = 0
        while i <= 29:
            now_url = root_url + str(i+1) + '/?filterOption=3'
            print(now_url)
            root_html_cont = self.downloader.disguiseDownloader(now_url)#Download the corresponding page

            try:
                root_1_links = self.parser.get_1_message(root_html_cont)
                self.root_links.extend(root_1_links)
            except:
                i = i -1
                self.sleep_now()
                print("error")
            i = i + 1
        print('all links of lagou:' , (len(self.root_links)),self.root_links )#%---ceshi---%
        #starting to catched

        for j in range(len(self.root_links)): #如果有待爬取得link
            self.j = j
            a = 0
            while a == 0:
                try:
                    new_url = self.root_links[j] #获取待爬取得url
                    print(new_url)
                    html_cont = self.downloader.disguiseDownloader(new_url)  #下载器下载页面
                    this_data = self.parser.parse(new_url,html_cont,j)
                    a = 1
                    self.retun_data.append(this_data)
                    #self.output_sql(this_data) #存入数据库
                except:
                    a = 0
                    print('craw failed')
    def output_sql(self,this_data):
        if this_data is None:
            return
        else:
            conn = pymysql.Connect(
                host = "127.0.0.1",
                port = 3306,
                user = 'root',
                passwd = '123456',
                db = 'data_collection',
                charset = 'utf8'
            )
            #sql_delete = "truncate table data_lagou"

            cursor = conn.cursor()

            #cursor.execute(sql_delete)
            try:
                sql_insert = "insert into data_lagou(Page_Link, Take_Time ,Word_Num , jobTitle , jobPlace , jobSalary ,jobExperience, jobEducation, jobType, jobPubTime, jobBenefits,jobDescription,jobKeyWord,companyName,companyManyLink,companyType ) " \
                             "value('%s', '%s','%d','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" \
                             % (self.root_links[self.j] , this_data['Take_Time'] , this_data['Word_Num'] ,this_data['jobTitle'] ,
                                this_data['jobPlace'] ,this_data['jobSalary'] ,this_data['jobExperience'] ,this_data['jobEducation'] ,
                                this_data['jobType'] ,this_data['jobPubTime'] ,this_data['jobBenefits'] ,this_data['jobDescription'] ,
                                this_data['jobKeyWord'] ,this_data['companyName'] ,this_data['companyManyLink'] ,"unknow" ,)

                cursor.execute(sql_insert)
                conn.commit()
                #此处被数据图表调用，传递该条信息
                print(cursor.rowcount)

            except Exception as e:
                print(e)
                #回滚
                conn.rollback()
            cursor.close()
            conn.close()
    def sleep_now(self):
        now = datetime.datetime.now()
        m = int(now.strftime('%M'))
        s = int(now.strftime('%S'))
        atime = m * 60 + s
        sl = random.randint(1,7)
        print(sl)
        time.sleep(sl)
        time.sleep(1)
        print(1)
        time.sleep(1)
        print(1)
    def get_all_url(self):
        """
        this is the entrance page of LaGou:
        https://www.lagou.com/zhaopin/2/?filterOption=3
        but here just need the previous part as the latter part of it is changed.
        so the following code will be built into a complete link to be used.
        :return:
        """
        root_url = "https://www.lagou.com/zhaopin/"
        obj_spider = SpiderMain()
        obj_spider.craw(root_url)

    def into_sql(self,min , max):
        print(min, max)
        lagou_all =[]
        topic = []
        con = self.sql_content.conten()
        sql_into = "select * from data_lagou WHERE %d <= Data_Id and Data_Id<= %d" % (min, max)
        sql_topic = "select COLUMN_NAME from information_schema.COLUMNS where table_name ='data_lagou'"
        cursor = con.cursor()
        try:
            cursor.execute(sql_into)
            rs = cursor.fetchall()
            for row in rs:
                list_row = list(row)
                list_row[2] = datetime.datetime.strftime(list_row[2], '%Y-%m-%d %H:%M:%S')
                # list_row[3] = list_row[3][:150] + '....'
                # # print(list_row[3])
                lagou_all.append(list_row)
            cursor.execute(sql_topic)
            rst = cursor.fetchall()
            for row in rst:
                list_row_t = list(row)
                topic.append(list_row_t)
        except Exception as e:
            print(e)
            con.rollback()
        cursor.close()
        con.close()
        print(topic)
        return lagou_all,topic

if __name__ == "__main__":

    root_url = "https://www.lagou.com/zhaopin/"
    obj_spider = SpiderMain()
    # obj_spider.craw(root_url)
    obj_spider.get_data(1,1)
