# coding=utf-8
import pandas as pd

import clean_csv
import fill_csver
import get_recorder
import html_downloader
import html_outputer
import html_parser
import to_mongor
import url_manager


class SpiderMain(object):
    def __init__(self):
        self.xm_url = 'http://www.panda.tv/live_lists?status=2&order=person_num&token=c1b1d58d07b02707daaabf7272090600&pageno={}&pagenum=120'
        self.platform = "熊猫"
        self.xm_csv = pd.DataFrame(columns=['title', 'type', 'name', 'looks', 'picture_url', 'room_url'])
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()
        self.csver = fill_csver.FillCsv()
        self.clean = clean_csv.CleanCsv()
        self.mongor = to_mongor.ToMongo()
        self.recorder = get_recorder.GetRecords()

    def craw(self):
        self.urls.get_urls(self.xm_url, 200)
        while self.urls.has_new_url():
            new_url = self.urls.get_new_url()
            xm_dict = self.downloader.xm_dict(new_url)
            if xm_dict == 1:
                continue
            elif xm_dict:
                self.parser.xm_parser(xm_dict, self.xm_csv)
            else:
                break
            break

        self.csver.fill_csv(self.xm_csv, self.platform)
        self.clean.clean_data(self.xm_csv)
        # print(self.xm_csv)

        self.outputer.to_maria(self.xm_csv, db_name='lives', table_name='desc_rooms2')
        self.mongor.to_mongo(self.xm_csv, self.platform)

        xm_records = self.recorder.get_records(self.xm_csv)
        return xm_records


if __name__ == '__main__':
    xm_spider = SpiderMain()
    xm_spider.craw()
