# -*- coding:utf-8 -*-
from database.db_business import DbBusiness
from common.my_time import MyTime
from common.my_baidu import MyBaidu
from common.my_file import MyFile
from common.attr_v3 import AttrV3
from bs4 import BeautifulSoup
import glob
import jieba
import re
import threading
import hashlib
import urllib.parse
import time
import os

'''
网络数据
'''


class BaiduStockInfo:
    def __init__(self):
        self.attr_v3 = AttrV3()
        self.business = DbBusiness()
        self.log_file = open('./out/' + os.path.split(__file__)[-1].split(".")[0] + ".log", 'a', encoding = 'utf-8')
        MyFile.wrtie_log(self.log_file, "开始")
        #self.market = MarketData()

    def __del__(self):
        MyFile.wrtie_log(self.log_file, "结束")
        self.log_file.close()

    def check_site(self, site_name):
        site_lists = ["站长之家","中关村在线","新浪","搜狐","东方财富网","同花顺","网易","金融界","证券之星","每日经济新闻","中国财经信息网","凤凰","和讯","中国经济网","格隆汇","中国网","人民资讯","中证网","第一财经","证券时报","华夏时报","中华网","经济观察报","人民网","中国民航网"]
        for d in site_lists:
            if re.search(d, site_name):
                return True
        print(site_name)
        return False
    
    def get_data(self, word):
        datas = self.business.query_stock_code_name()
        end_date = MyTime.forward_relative_date(7)
        check_title = ["状告","败诉","胜诉","涉案","执行金额","宣判","受贿","上诉","法院","诉讼","纠纷","侵犯","侵权","商标","仲裁","判决","驳回","冻结","索赔","被执行人","撤诉","起诉","违约","一审","二审","终审","重审","获赔","索偿","官司","判赔","状告","败诉","胜诉","造假","违法","重罚","行政处罚"]
        for k,v in datas.items():
            next_page = True
            words = ["\"" + v + "\"", word]
            for i in range(1,2):
                #time.sleep(1)
                if next_page == False:
                    break
                url = MyBaidu.get_url_order_by_time(words, i)
                try:
                    proxy = self.business.query_proxy()
                    res = MyBaidu.get_baidu_data_by_proxy(url, proxy)
                    for r in res:
                        data_date = MyBaidu.calc_date(r["date"])
                        if data_date < end_date:
                            next_page = False
                            break
                        if MyBaidu.check_site(r["site"]) == False:
                            continue
                        if MyBaidu.check_title(r["title"], [[v], check_title]) == False:
                            continue
                        data_title = r["title"]
                        data_site = r["site"]
                        data_url = r["url"]
                        print(r["date"] + data_date)
                        table_name = self.attr_v3.get_table_name(3003)
                        self.attr_v3.add_attr_data(table_name, k, data_title, data_date, data_url, data_site)
                except Exception as e:
                    #i = i - 1
                    pass


if __name__ == '__main__':
    lock_file = open('./out/' + os.path.split(__file__)[-1].split(".")[0] + ".lock", 'a', encoding = 'utf-8')
    if False == MyFile.lock_file(lock_file):
        quit()
    d = BaiduStockInfo()
    for word in ["诉讼","官司","纠纷"]:
        d.get_data(word)

'''
def my_thread(word):
    d = BaiduStockInfo()
    d.get_data(word)

if __name__ == '__main__':
    threads = []
    for k in ["诉讼","官司","纠纷"]:
        t = threading.Thread(target=my_thread, args=(k,))
        t.daemon = True
        threads.append(t)
    for t in threads:
        t.start()
    for t in threads:
        t.join()
'''