from selenium import webdriver
from time import sleep
from config import liuyao_config
from selenium.webdriver.common.by import By
from selenium.common import exceptions
from logs.set_logfile import logger
from exception.scraping_exception import scraping_exception
from config import (file_url_config,
                    query_params_config)
from os import listdir, getcwd
from utils import file_utils, yzm_utils
from clean_data.liuyao_clean import clean_data
import traceback

def get_liuyao_data():
    options = webdriver.ChromeOptions()
    options.add_experimental_option("prefs", {
        "download.default_directory": getcwd() + '\download_selenium_excel_file',
        "download.prompt_for_download": False,
        "download.directory_upgrade": True,
        "safebrowsing.enabled": True})
    options.add_experimental_option('excludeSwitches', ['enable-automation'])
    options.add_argument('disable-javascript')
    driver = webdriver.Chrome(options=options)
    driver.get(liuyao_config.login_url)
    count = 1
    while True:
        sleep(5)
        logger.info(f"柳药{count}次尝试登陆")
        driver.find_element(By.XPATH, '//*[@id="code1"]').send_keys(liuyao_config.username)
        driver.find_element(By.XPATH, '//*[@id="password"]').send_keys(liuyao_config.password)
        # 3. 处理验证码 以及输入验证码
        captcha_text = yzm_utils.handle_yzm(driver, driver.find_element(By.XPATH, '//*[@id="Table_01"]/tbody/tr[4]/td/table/tbody/tr[4]/td[2]/img'))
        sleep(10)
        driver.find_element(By.XPATH, '//*[@id="authinput"]').send_keys(captcha_text)
        sleep(5)
        driver.find_element(By.XPATH, '//*[@id="button"]').click()
        try:
            alert = driver.switch_to.alert
            if alert.text == "验证码不正确！":
                alert.accept()
                logger.warning("柳药验证码识别失败")
                continue
        except exceptions.NoAlertPresentException:
            break
        count += 1
        if count > 5:
            msg = "柳药登陆失败！"
            logger.error(msg)
            raise scraping_exception(msg)
    sleep(5)
    start_time = liuyao_config.query_start_time
    end_time = liuyao_config.query_end_time
    mid_str = day_month_year(start_time) + '-' + day_month_year(end_time)
    liuxiang_url = ('http://www.lzyy.cn:6119/usrweb/lzyy/htdocs/srcnew/report/report.jsp?heigh=-1&tj=a_cxrq1='
                    + start_time + ';a_cxrq2='
                    + end_time + ';a_code1=h440;a_gcode=;a_mark=cust_xs;titl=%B2%E9%D1%AF%C8%D5%C6%DA%A3%BA'
                    + mid_str + '('
                    + end_time + '%A3%ACh440%D6%DA%D6%BE%B7%C9%BE%C8%D2%BD%C1%C6%BF%C6%BC%BC%A3%A8%C9%EE%DB%DA%A3%A9%D3%D0%CF%DE%B9%AB%CB%BE)&raq=lzmis/cust_xs.raq%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20&data=mis&scrollWidth=NaN&scrollHeight=NaN')
  #  kucun_url = 'http://www.lzyy.cn:6004/usrweb/lzyy/htdocs/srcnew2_old/report/report.jsp?heigh=-1&tj=a_cxrq1=;a_cxrq2=;a_code1=h440;a_gcode=;a_mark=cust_kc;a_cxtj=;a_bno=&raq=lzmis/cust_kc.raq%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20&data=mis&scrollWidth=NaN&scrollHeight=NaN'
    try:
        alert = driver.switch_to.alert
        alert.accept()
    except exceptions.NoAlertPresentException:
      pass
    finally:
        sleep(5)
        driver.get(liuxiang_url)
        with open(liuyao_config.liuyao_goods_excel_url, 'w', encoding='utf-8') as f1:
            f1.write(driver.page_source)
        sleep(2)
        with open(liuyao_config.liuyao_goods_html_url, 'w', encoding='utf-8') as f1:
            f1.write(driver.page_source)
        sleep(2)
       # driver.get(kucun_url)
       #  with open(liuyao_config.liuyao_goods_storge_excel_url, 'w', encoding='utf-8') as f2:
       #      f2.write(driver.page_source)
       #  sleep(2)
       #  with open(liuyao_config.liuyao_goods_storge_html_url, 'w', encoding='utf-8') as f2:
       #      f2.write(driver.page_source)
        driver.quit()


def day_month_year(date):
    return date[5:7] + '/' + date[8:11] + '/' + date[0:4]


def scraping():
    try:
        file_utils.delete_file(liuyao_config.liuyao_error_file_url)
        file_utils.delete_file(liuyao_config.liuyao_goods_html_url)
        file_utils.delete_file(liuyao_config.liuyao_goods_excel_url)
        file_utils.delete_file(liuyao_config.excel_file_url)
        logger.info("开始获取柳药销售数据！")
        get_liuyao_data()
        logger.info("柳药销售数据获取成功！")
        logger.info("柳药开始数据清洗!")
        clean_data()
        logger.info("柳药数据清洗成功!")
    except Exception as e:
        print(str(e))
        traceback.print_exc()
        logger.error("柳药导出报错!")
        logger.error(e)
        # file_utils.delete_file(liuyao_config.liuyao_goods_html_url)
        # file_utils.delete_file(liuyao_config.liuyao_goods_excel_url)
        # file_utils.delete_file(liuyao_config.excel_file_url)
        file_utils.scraping_error(file_url=liuyao_config.liuyao_error_file_url, log_msg=e)


if __name__ == '__main__':
    # 今天的文件没有创建才可以调用这个方法
    file_utils.create_today_dir()
    scraping()



