import csv

import pandas as pd
import re
import time

from selenium import webdriver
from pymysql import connect
from lxml import etree


class FilmBoxSpider:

    def __init__(self):
        pass
        # self.driver = webdriver.Chrome()
        # self.conn = connect(host='localhost', port=3306, user='root', password='root', database='boss_spider', charset='utf8')
        # self.cursor = self.conn.cursor()
        # self.films = []

    def crawl(self):
        url = 'http://www.cbooo.cn/year?year={}'
        for year in range(2008, 2020):
            self.driver.get(url.format(year))
            html = self.driver.page_source
            element = etree.HTML(html)
            for tr in element.xpath('//table[@id="tbContent"]/tbody/tr[position()>1]'):
                item = dict(
                            name=tr.xpath('./td[1]/a/@title')[0] if tr.xpath('./td[1]/a/@title')[0] else '',
                            type=tr.xpath('./td[2]/text()')[0] if tr.xpath('./td[2]/text()') else '',
                            box=tr.xpath('./td[3]/text()')[0] if tr.xpath('./td[3]/text()') else 0,
                            film_price=tr.xpath('./td[4]/text()')[0] if tr.xpath('./td[4]/text()') else 0,
                            avg_views=tr.xpath('./td[5]/text()')[0] if tr.xpath('./td[5]/text()') else 0,
                            area=tr.xpath('./td[6]/text()')[0] if tr.xpath('./td[6]/text()') else '',
                            time=tr.xpath('./td[7]/text()')[0] if tr.xpath('./td[7]/text()') else '1970-01-01'
                        )
                # print(item)
                self.films.append(item)
            time.sleep(1)
        self.driver.quit()

    def save(self):
        for film in self.films:
            name = film['name']
            name = name.replace("'", "\\'")
            box = int(film['box'])
            film_price = int(film['film_price'])
            avg_views = int(film['avg_views'])
            sql = "insert into films (name,type,box,film_price,avg_views,area,time) values ('{name}','{type}',{box}," \
                  "{film_price},{avg_views},'{area}','{time}')".format(
                        name=name,
                        type=film['type'],
                        box=box,
                        film_price=film_price,
                        avg_views=avg_views,
                        area=film['area'],
                        time=film['time']
                    )
            print(sql)
            self.cursor.execute(sql)
            self.conn.commit()

    def prepare(self):
        self.cursor.execute("SELECT * FROM `films` ORDER BY year asc,box desc")
        films = []
        for film in self.cursor.fetchall():
            # 票房，单位 亿
            box = round(film[3]/10000, 2)
            year = str(film[7])
            item = (film[1], film[2], box, year)
            films.append(item)
        df = pd.DataFrame(films, columns=['name', 'type', 'value', 'date'])
        df.to_excel('film.xls')

    def prepare_hurun(self):
        data = []
        with open('hurun2018.csv') as csvfile:
            csv_reader = csv.reader(csvfile)
            for row in csv_reader:
                data.append(row)
        jiazhu_names = []
        for k, v, in enumerate(data):
            if v[0].endswith('家族'):
                name = v[0][:-2]
                jiazhu_names.append(name)
        for kk, vv in enumerate(data):
            if vv[0] in jiazhu_names:
                data[kk][0] = '{}家族'.format(vv[0])
        df = pd.DataFrame(data, columns=['name', 'value', 'company', 'type', 'date'])
        df.to_csv('hurun.csv')


if __name__ == '__main__':
    spider = FilmBoxSpider()
    # spider.crawl()
    # spider.save()
    spider.prepare_hurun()
