#!/usr/bin/env python3

from datetime import datetime
import os
import re
from typing import Dict, List

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
import json
from time import sleep


class Concept(object):
    def __init__(self, name: str, url: str) -> None:
        self.name = name
        self.url = url

    def getSharesList(self):
        driver = initDriver()
        self.sharesList = getSharesOfConcept(self.url, driver)
        driver.quit()

    def writeToFile(self):
        _writeConceptToFile(self)

    def readFromFile(self, filePath: str):
        ret = False
        if os.path.exists(filePath):
            with open(filePath,  "r",  encoding='utf-8') as f:
                data = json.load(f)
                self.name = data['name']
                self.url = data['url']
                self.sharesList = data['sharesList']
                myLog(f"data: {str(data)}")
                myLog(f"object: {self}")
            ret = True
        else:
            myLog(f"filePath: {filePath} not exsits")
        return ret


def myLog(msg: str):
    with open("log.txt", "a", encoding='utf-8') as f:
        now = datetime.now()
        f.write(f"{now:%d-%m-%Y} {now:%H:%M:%S}: {msg}\n")


def initDriver() -> WebDriver:
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.add_argument("--disable-extensions")
    chrome_options.add_argument("--disable-notifications")
    chrome_options.add_argument("--disable-Advertisement")
    chrome_options.add_argument("--disable-popup-blocking")
    chrome_options.binary_location = r"E:\practice\myshare2\myshare2\bin\chrome-win64\chrome.exe"
    chrome_options.add_experimental_option("excludeSwitches",
                                           ["enable-automation"])
    my_service = Service(executable_path=
                         r"E:\practice\myshare2\myshare2\bin\chromedriver.exe")
    return webdriver.Chrome(service=my_service, options=chrome_options)


def _getConceptsList(url: str) -> Dict:
    driver = initDriver()
    driver.get(url)
    # time.sleep(10)
    concept_1 = driver.page_source
    driver.quit()
    soup = BeautifulSoup(concept_1, 'html.parser')
    tbody = soup.find_all("tbody")[1]
    tds = []
    for tr in tbody.find_all("tr"):
        tds.append(tr.find("a"))
    myLog(str(tds))
    concepts = {}
    for a in tds:
        myLog(a.text)
        myLog(a.get("href"))
        concepts[a.text] = a.get("href")
    myLog(str(concepts))
    return concepts


def getSharesOfConcept(url: str,  driver: WebDriver) -> List:
    ret = []
    driver.get(url)
    driver.implicitly_wait(10)
    while True:
        try:
            driver.implicitly_wait(10)
            driver.find_element(By.ID, "list_amount_ctrl").find_element(
                    By.LINK_TEXT, "80").click()
            break
        except NoSuchElementException as e:
            myLog(str(e))
            continue
        except StaleElementReferenceException as e:
            myLog(str(e))
            continue
    xz = 0
    while True:
        xz = xz + 1
        myLog(f"第 {xz} 页")
        ret = ret + __getSharesFromSource(driver.page_source)
        try:
            driver.find_element(By.LINK_TEXT, "下一页").click()
            ret = ret + __getSharesFromSource(driver.page_source)
        except NoSuchElementException as e:
            myLog(f'except: {e}')
            break
        except StaleElementReferenceException as e:
            myLog(str(e))
            continue
    ret = [x for i, x in enumerate(ret) if x not in ret[:i]]
    myLog(f"length conept: {len(ret)}")
    myLog(str(ret))
    return ret


def _writeConceptToFile(concept: Concept):
    if not os.path.exists("概念板块"):
        os.mkdir("概念板块")
    out = {}
    out['name'] = concept.name
    out['url'] = concept.url
    out['sharesList'] = concept.sharesList
    path = "概念板块/" + concept.name + ".json"
    myLog(path)
    with open(path, "w", encoding="utf-8") as f:
        f.write(json.dumps(out, ensure_ascii=False))


def __getSharesFromSource(page_source: str) -> list:
    ret = []
    soup = BeautifulSoup(page_source, "html.parser")
    tbody = soup.find_all("tbody")[3]
    trs = tbody.find_all("tr")
    for tr in trs:
        anchors = tr.find_all("a")
        share = {}
        for a in anchors:
            if re.match(r'[a-zA-Z]{2}\d{6}', a.text):
                share['code'] = a.text
        for a in anchors:
            x = a.text.replace(" ", "")
            if re.match(r'[sStT\*\u4e00-\u9fff]+', x):
                share['name'] = x
        if 'name' in share.keys() and 'code' in share.keys():
            ret.append(share)
    return ret


class Concepts(object):
    def __init__(self):
        self.ConceptsUrl = "https://finance.sina.com.cn/stock/sl/#concept_1"

    def getConceptsInfos(self):
        self.ConceptsInfos = _getConceptsList(self.ConceptsUrl)
        return self.ConceptsInfos

    def updateConceptsList(self):
        self.ConceptsList = []
        for key in self.ConceptsInfos:
            concept = Concept(key, self.ConceptsInfos[key])
            concept.getSharesList()
            concept.writeToFile()
            self.ConceptsList.append(concept)
            sleep(20)
        return self.ConceptsList
