# /usr/bin/env python
# -*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
from logs.Logger import Logger
from crawler import config
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import re
import numpy as np
import pandas as pd

path = config.generalConfig.little_red_book_output_path
log_path = config.generalConfig.log_path
urls_path = config.generalConfig.feed_file
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 日志初始化
log = Logger(log_path + "/crawler.log", level='info')

path = config.generalConfig.dongchedi_path


# def getContext(url):
#     source_code = requests.get(url, headers=headers)
#     source_code.encoding = 'utf-8'
#     plain_text = source_code.text
#     soup = BeautifulSoup(plain_text,  'lxml')
#     return soup

class DongchediCrawler:

    title = ""
    titles = ["指标"]
    df = pd.DataFrame()

    def readSeedFile(self, path):
        with open(path, 'r', encoding="utf-8", errors="ignore") as f:
            # print(f.read())
            return f.read()

    def analysis_content(self, context):
        tree = BeautifulSoup(context, "lxml")
        # Analysis Content Title
        head = tree.find("div", class_=re.compile('^cell_header-car'))
        self.title = head.find("a").get("title")
        # Analysis Config Group
        config_main = tree.find("div", class_=re.compile('configuration_main'))
        config_bodys = config_main.find_all(attrs={"name": re.compile("config-body-")})
        print("# " + self.title)
        for config_body in config_bodys:
            self.analysis_group(config_body)

    def analysis_multiple_content(self, context):
        tree = BeautifulSoup(context, "lxml")
        # Analysis Content Title
        heads = tree.find_all("div", class_=re.compile('^cell_header-car'))
        for head in heads:
            self.titles.append(head.find("a").get("title"))
        # print(self.titles)
        self.df = pd.DataFrame(columns=self.titles)
        self.analysis_multiple_group(tree)
        # # Analysis Config Group
        # config_main = tree.find("div", class_=re.compile('configuration_main'))
        # config_bodys = config_main.find_all(attrs={"name": re.compile("config-body-")})
        # print("# " + self.title)
        # print(self.df)
        self.md_output()


    def analysis_group(self, context):
        title = context.find("h3").text
        offset = title.find("●")
        if offset > 0:
            title = title[:offset]
        # print(context)
        kvs = context.find_all(attrs={"data-row-anchor": True})
        print("## " + title)
        print("| 指标 | 取值 |")
        print("|----|----|")
        for kv in kvs:
            # print(kv)
            group_name = kv.find("label").text
            # print(group_name)
            value_div = kv.find_all(attrs={"class": re.compile("table_col__")})
            value = value_div[1].text
            value = re.sub('\s|\t|\n', '', value)

            print("|" + group_name + "|" + value + "|")

    def analysis_multiple_group(self, context):
        # title = context.find("h3").text
        # offset = title.find("●")
        # if offset > 0:
        #     title = title[:offset]
        # print(context)
        kvs = context.find_all(attrs={"data-row-anchor": True})
        index = 1
        for kv in kvs:
            group_name = kv.find("label").text
            steps = len(self.titles)
            values = [group_name]
            # value_div = None
            for step in range(steps-1):
                value_div = kv.find_all("div", {"style": "--config-col-index:" + str(step+1)}, {"class": re.compile("table_col__")})
                row_divs = value_div[0].find_all(attrs={"class": re.compile("table_row__")})
                if len(row_divs) > 0:
                    value_str = self.get_column_value_by_step(value_div, step)
                    values.append(value_str)
                else:
                    values.append(value_div[0].text)
            # temp_df = pd.DataFrame(index=group, data=values, columns=self.titles)
            # self.df.append(temp_df)
            self.df.loc[index] = values
            index = index + 1

    def get_column_value_by_step(self, row_content, step):
        elements = row_content.find_all()

    def md_output(self):
        md_str = ""
        for column in self.df.columns:
            md_str = md_str + column + "|"
        md_str = md_str[:-1]
        md_str = md_str + "\n"
        for step in range(len(self.df.columns)):
            md_str = md_str + "----|"
        md_str = md_str[:-1]
        md_str = md_str + "\n"
        for row_data in self.df.itertuples():
            for step in range(len(self.df.columns)):
                if step == 0:
                    continue
                md_str = md_str + str(row_data[step]) + "|"
            md_str = md_str[:-1] + "\n"
        print(md_str)

if __name__ == "__main__":
    url = "https://www.dongchedi.com/auto/params-carIds-77876"
    crawler = DongchediCrawler()
    context = crawler.readSeedFile("D://data//懂车帝.txt")
    crawler.analysis_content(context)
    print("finished!")
