# coding=utf-8
import pandas as pd

import clean_csv
import fill_csver
import get_page_number
import get_recorder
import html_downloader
import html_outputer
import html_parser
import to_mongor
import url_manager


class SpiderMain(object):
    def __init__(self):
        self.dy_url = "https://www.douyu.com/directory/all?page={}&isAjax=0"
        self.platform = "斗鱼"
        self.dy_csv = pd.DataFrame(columns=['title', 'type', 'name', 'looks', 'picture_url', 'room_url'])
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()
        self.csver = fill_csver.FillCsv()
        self.clean = clean_csv.CleanCsv()
        self.pages = get_page_number.GetPages()
        self.mongor = to_mongor.ToMongo()
        self.recorder = get_recorder.GetRecords()

    def craw(self):
        page_num = self.pages.get_pages()
        print('斗鱼当前页数：', page_num)
        self.urls.get_urls(self.dy_url, page_num+1)
        while self.urls.has_new_url():
            new_url = self.urls.get_new_url()
            xm_response = self.downloader.dy_soup(new_url)
            self.parser.dy_parser(xm_response, self.dy_csv)
            # break

        self.csver.fill_csv(self.dy_csv, self.platform)
        self.clean.clean_data(self.dy_csv)
        # print(self.dy_csv)

        self.outputer.to_maria(self.dy_csv, db_name='lives', table_name='desc_rooms')
        self.mongor.to_mongo(self.dy_csv, self.platform)

        dy_records = self.recorder.get_records(self.dy_csv)
        return dy_records


if __name__ == '__main__':
    xm_spider = SpiderMain()
    xm_spider.craw()
