import json
import time

from pages.basePage import BasePage
from selenium.webdriver.common.action_chains import ActionChains
from config.gp_config import GpConfig
from tools.request import Base
from tools.utils import log_time
from tools.logs import Logs

logger = Logs(__name__).get_logger()


class GpHomePage(BasePage):

    def __init__(self, sj_type):
        super().__init__()
        self.type_data = GpConfig.gp_home_xpath.get(sj_type)
        self.request = Base()
        self.jlr_title_xpath = '//*[@class="finance4 afinance4"]/table/thead/tr/th/text()'
        self.jlr_xpath = '//*[@class="finance4 afinance4"]/table/tbody/tr[1]/td/text()'
        self.sz_xpath = '//*[@class="brief_info_c"]//*[@class="price_draw blinkblue"]/text()'
        self.zj_xpath = '//*[@id="table_ls"]/table/tbody/tr[{}]/td/text()'
        self.n_xpath = '//*[@class="qcgi_t"]/text()'
        self.lr_xpath = '//*[@class="qcgi_b"]/div/text()'
        self.bk_id_xpath = '//*[@class="xgbk"]/table/tbody/tr/td/a[1]/@href'
        self.bks_xpath = '//*[@class="xgbk"]/table/tbody/tr/td[1]/a/@href'
        self.gp_top_xpath = '//*[@class="table1 pl5 lh_5"]//tr[1]/td/*/text()'
        self.gj = '//*[@class="zxj"]/span/span/text()'

    def get_gp_base_data(self, dm: str):
        """
        查询是否净利润
        :param html:
        :return:
        """
        url = self.type_data.get("url").format(dm)
        html = self.get_page_source(url)
        jlr_t = self.request.parse(html, self.jlr_title_xpath)
        jlr = self.request.parse(html, self.jlr_xpath)
        return dict(zip(jlr_t, jlr))

    @log_time
    def get_gp_jiage(self, dm: str):
        """
        查询当前价格
        :param html:
        :return:
        """
        url = self.type_data.get("url").format(dm)
        html = self.get_page_source(url)
        jlr_t = self.request.parse(html, self.gj)
        return jlr_t

    def get_gp_bk(self, dm: str):
        """
        获取股票所属行业板块
        :return:
        """
        html = self.get_page_source(self.type_data.get("url").format(dm))
        bk = self.request.parse(html, self.bks_xpath)
        bk_l = ['BK' + i.split('BK')[-1] for i in bk]
        # bk = 'BK' + bk.split('BK')[-1]
        return bk_l

    def get_gp_top_one(self, dm: str):
        """
        获取股票所属行业板块
        :return:
        """
        # 跳转到首页
        url = self.type_data.get("url").format(dm)
        html = self.get_page_source(url)
        bk = self.request.parse(html, self.gp_top_xpath)
        return bk

    def get_gp_zs(self, dm: str, day: int) -> list:
        """
            获取指数指定天数内的所有信息
            x=155刚好是第10天，每隔一天加7.4px
            :param dm == 交易所加上股票代码 sh00001
        """
        if dm[:2] not in ("sh", "sz", "BK"):
            raise Exception("代码错误，缺少交易所前缀")
        # 跳转到首页
        url = self.type_data.get("url").format(dm)
        xpath = self.type_data.get("main_xpath")
        self.get_page_source(url)
        x = 155 + (10 - day) * 7.4
        element = self.get_element_id('emchartk')
        new_d = []
        for i in range(day):
            ActionChains(self.driver).move_to_element_with_offset(element, x + i * 7.4, 0).perform()
            html = self.get_page()
            date_el = self.request.parse(html, xpath.get("date"))[0]
            print(date_el)
            k_el = self.request.parse(html, xpath.get("title"))
            v_el = self.request.parse(html, xpath.get("result"))
            zs_d = dict(zip(k_el, v_el))
            zs_d["date"] = date_el
            new_d.append(zs_d)

            time.sleep(0.5)
        return new_d



if __name__ == '__main__':
    gh = GpHomePage("gp")
    with open("D:\\PycharmProjects\\test\\stock-analysis-project\\data\\gp_c.json", "r", encoding="utf-8") as f:
        gp_list = json.loads(f.read())
    for gp in gp_list:
        res = gh.get_gp_bk(gp.get("dm"))
        gp["bks"] = res
        print(gp, ",")
