from utils import file_utils
import timeit
from scraping import guoyao_scraping, liuyao_scraping, shanlian_scraping,jinbo_scraping
from logs.set_logfile import logger
from clean_data import merge_excel, shangyao_clean
from h3yun.service import sales_data_service


# 如果有哪一个上传失败了 重新运行一遍就可以的
def start():
    start_time = timeit.default_timer()
    print("开始计时")
    logger.info("开始抓取数据")

    # 为今天所爬取的数据创建好相应的文件夹
    file_utils.create_today_dir()

    # 金博
    jinbo_scraping.scraping()

    # 柳药
    liuyao_scraping.scraping()

    # 国药 广西和柳州
    guoyao_scraping.scraping()

    # 闪链
    shanlian_scraping.scraping()

    # 合丹
   # shangyao_clean.clean_data()

    # 合并全部的excel表格
    # 合并一天的
    merge_excel.merge_excel_a_day()


    # 把一天汇总的合并到主表上面
  #  merge_excel.merge_total_excel()

    logger.info("开始上传到氚云")
    # 上传到氚云上面
    sales_data_service.upload_day_sales_data()
    logger.info("上传到氚云成功")

    end_time = timeit.default_timer()
    elapsed_time = end_time - start_time
    msg = f"计时时间: {elapsed_time / 60.0} 分钟"
    print("计时结束")
    print(msg)
    print("数据获取完毕")
    logger.info("数据获取完毕")
    logger.info(msg)

