import pymysql
from JobData.xiaomi_xiaoyuan import html_downloader

from JobData.xiaomi_xiaoyuan import html_parser


class SpiderMain(object):
    def __init__(self):
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.new_data = []
    #爬虫的调度程序
    def craw(self,root_url):
        root_links = []#小米校招所有目标页面链接
        #抓取所有校园招聘root链接
        for i in range(3):
            now_url = root_url + '-' + str(i+1)
            root_html_cont = self.downloader.download(now_url)
            root_link = self.parser.get_all_state(root_html_cont) #小米官网root_html的url,是一个set
            root_links.extend(root_link)
        print('小米校招所有链接 : %s %d' % (root_links , len(root_links)))#%---ceshi---%

        #对所有链接爬取数据
        for i in range(len(root_links)): #如果有待爬取得link
            try:
                new_url = root_links[i] #获取待爬取得url

                html_cont = self.downloader.download(new_url)  #下载器下载页面
                self.new_data = self.parser.parse(new_url,html_cont) #解析器解析页面
                #new_data是一个包含所有页面的所有信息的集合列表
                #print(new_data,len(new_data))
            except:
                print('craw failed')
        self.output_sql(self.new_data)
        return self.new_data

    def output_sql(self,all_data):
        conn = pymysql.Connect(
            host = "127.0.0.1",
            port = 3306,
            user = 'root',
            passwd = '123456',
            db = 'data_collection',
            charset = 'utf8'
        )
        sql_delete = "truncate table data_xiaomi"

        cursor = conn.cursor()

        cursor.execute(sql_delete)
        for data in all_data:
            sql_insert = "insert into data_xiaomi(Job_Title, Job_Place ,Job_Type , Recruit_Way , Recruit_Place , Job_Duties , Job_Requirement ,Add_Time ) " \
                         "value('%s', '%s','%s','%s','%s','%s','%s','%s')" \
                         % (data['Job_Title'] , data['Job_Place'] , data['Job_Type'] , data['Recruit_Way'] , data['Recruit_Place'] , data['Job_Duties'] , data['Job_Requirement'] , data['Add_Time'])
            try:
                cursor.execute(sql_insert)
                conn.commit()
                print(cursor.rowcount)

            except Exception as e:
                print(e)
                #回滚
                conn.rollback()
        cursor.close()
        conn.close()
    def get_all_url(self):
        root_url = "http://hr.xiaomi.com/campus/list/0-0-0-0"
        obj_spider = SpiderMain()
        all_url = obj_spider.craw(root_url)
        #print('this is first: %s' , all_url)
        return all_url
if __name__ == "__main__":
    root_url = "http://hr.xiaomi.com/campus/list/0-0-0-0"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
