#记录开始运行时间
import datetime
with open('record.txt', 'a+', encoding='utf-8') as f:
    f.write("程序于" + str(datetime.datetime.now()) + "开始运行\n")
    f.close()

import os

#从文件夹里找到所有含有source字段的文件
dir_list = os.listdir()
source_list_0 = []
for dir_elem in dir_list:
    if "source" in dir_elem:
        source_name = dir_elem.replace(".py", "")
        source_list_0.insert(len(source_list_0), source_name)

source_list = []
para_1 = 1
if para_1 == 0:
    source_list.insert(len(source_list), source_list_0[3])
elif para_1 == 1:
    for source in source_list_0:
        source_list.insert(len(source_list), source)
print(source_list)
from shutil import copyfile
import importlib
import requests

j = 0
k = 1
#根据每一个source的常量进行爬取
for source_name in source_list:
    if j == 0:
        source_module = importlib.import_module(name=source_name)
    elif j > 0:
        source_module = importlib.reload(importlib.import_module(name=source_name))
    i = 0
    #将当前source的有用参数全部导入到source.py
    for elem in vars(source_module).items():
        if "__" not in elem[0]:
            vars()[elem[0]] = elem[1]
            with open('source.py', 'a+', encoding='utf-8') as f: 
                f.write(str(elem[0]) + " = r\'" + str(elem[1]) + "\'\n")
                f.close()
    os.system('python .\source.py')
    from frame import *
    print("正在爬取", source)
    #启动浏览器
    try:
        driver = getDriver()
    except Exception as e:
        print(e)
    #获取所有新闻链接，命名为target_list
    try:
        target_list = getBasicInfo(url=url, driver=driver)#target_list is a list of objects whose class is a tree of basic info
        print("已获取", len(target_list), "条基本信息！")
    #由于python3当前版本bug，有时会import不到相同文件夹下的模块，须重复import
    except Exception as e:
        print(e)
        print("无法获取基本信息，即将重来..")
        with open('record.txt', 'a+', encoding='utf-8') as f:
            f.write("因为" + str(source) + "在" + str(datetime.datetime.now()) + "重新爬取了" + str(k) + "次" + "\n")
            f.close()
        os.remove('source.py')
    #若重复import次数太多，可能会影响到定时任务的下一个，所以超过13次时将中断此次爬取（概率较小）
        if k > 14:
            print("重复尝试爬取次数过多，即将跳过重复爬取for just this period.")
            continue
        elif k <= 14:
            source_list.insert(len(source_list), source_name)
            k += 1
            continue
    #从每篇文章的link中获取title和contents等各种信息
    content_list = getContent(input_list=target_list, driver=driver) #content_list is a list of objects whose class is a tree of contents
    print("已获取全部信息！")
    #将爬取的所有信息放入数据库
    sql_id_list = insertSql(input_list=content_list)
    print("已将所有信息导入Sql！")
    #以下根据print内容自行脑部功能，不做赘述
    enterpriseRelation(input_list=content_list)
    print("已关联相关企业新闻！")
    insertImages(content_list=content_list, sql_id_list=sql_id_list)
    print("已将图片导入图片库！")
    quitFirefox(driver=driver)
    print("已关闭浏览器")
    try:
        checkImg()
        print("已完成图片查重！")
    except Exception as e:
        print(e)
        print("无法获取基本信息，即将重来..")
        with open('record.txt', 'a+', encoding='utf-8') as f:
            f.write("因为" + str(source) + "在" + str(datetime.datetime.now()) + "重新爬取了" + str(k) + "次" + "\n")
            f.close()
        os.remove('source.py')
        if k > 14:
            print("重复尝试爬取次数过多，即将跳过重复爬取for just this period.")
            continue
        elif k <= 14:
            source_list.insert(len(source_list), source_name)
            k += 1
            continue
    randomizeNews()
    print("已完成乱序显示！")
    print("已完成", source, "爬取！")
    os.remove('source.py')
    print("已删除临时数据源")
    j += 1  

print("\n\n\n", "已完成所有爬取！")


#记录结束运行时间
with open('record.txt', 'a+', encoding='utf-8') as f:
    f.write("程序于" + str(datetime.datetime.now()) + "结束运行\n")
    f.close()









        