# -*- coding:utf-8 -*-
from database.db_handler import MysqlHander
from common.my_http import MyHttp
import urllib.request
from bs4 import BeautifulSoup
import glob
import jieba
import re
import threading
import hashlib
import urllib.parse
import time
import timedelta
import datetime
import requests
from database.db_business import DbBusiness

'''
巨潮资讯网
'''

file = open('./out/juchao_data.txt', 'w', encoding = 'utf-8')
headers={'content-type':'application/x-www-form-urlencoded'}
class JuchaoData:
    def __init__(self, start_id = 0, end_id = 999999):
        self.db = MysqlHander("config.ini")
        self.stock_name = {}
        self.stock_exchange = {}
        self.table = ""
        self.business = DbBusiness()
        self.start_id = start_id
        self.end_id = end_id

    def get_baidu_url(self, code_name, info_name, i):
        return "https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&ie=utf-8&word=" + urllib.parse.quote(code_name) + "+" + urllib.parse.quote(info_name) + "&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=" + str(10 * i)
        #return "https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&ie=utf-8&word=" + code_name + "+" + info_name + "&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=" + str(10 * i)
        #return "https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&ie=utf-8&word=%E7%99%BD%E4%BA%91%E6%9C%BA%E5%9C%BA+%E8%B0%83%E4%BB%B7&x_bfe_rqs=03E8000000000000000022&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=40
    def add_attr_data_temp(self, code, relation_new, data_title, data_date, data_url, data_site):
        info_type = 0
        insert = "insert into relation_data_03_temp (Fsrc_code, Fsrc_type, Frelation_id, Ftitle, Fdate, Furl, Fsite, Fstatus, Fcreate_time, Fmodify_time) values "
        insert += "('" + code + "', 0, " + str(relation_new) + ",'" + data_title + "','" + data_date + "','" + data_url + "','" + data_site + "',0, now(), now());"
        
        print(insert)
        try:
            #pass
            self.db.execute_not_safe(insert)
            #file.write(insert + "\n")
        except Exception as e:
            print(str(e))
            pass

    def add_attr_data(self, code, data_title, data_date, data_url, data_site):
        info_type = 0
        insert = "insert into " + self.table + " (Fsrc_code, Fsrc_type, Ftitle, Fdate, Furl, Fsite, Fcreate_time, Fmodify_time) values "
        insert += "('" + code + "', 0,'" + data_title + "','" + data_date + "','" + data_url + "','" + data_site + "', now(), now());"
        
        print(insert)
        try:
            #pass
            self.db.execute_not_safe(insert)
            #file.write(insert + "\n")
        except Exception as e:
            print(str(e))
            pass

    def check_site(self, site_name):
        site_lists = ["新浪","搜狐","腾讯","东方财富网","同花顺","网易","金融界","证券之星","每日经济新闻","中国财经信息网","凤凰","和讯","中国经济网","格隆汇","中国网","人民资讯","中证网","第一财经","证券时报","华夏时报","中华网","经济观察报","人民网","中国民航网"]
        for d in site_lists:
            if re.search(d, site_name):
                return True
        print(site_name)
        return False

    def get_code(self, data_title):
        if data_title.find("|") >= 0:
            print("|")
            return 0
        if data_title.find("?") >= 0:
            print("?")
            return 0
        if data_title.find(";") >= 0:
            print(";")
            return 0
        deny_list = ["首次公开发行", "证券代码","股价回撤","股东户数","开启申购","交易异常波动","新股申购","丨","偏离值达到","户均持股","盘中跌幅","盘中涨幅","(系列)","互动平台","异常波动","封板","快速反弹","集体接待日","量价齐升","强势特征","短线","每日追踪","利空","利好","董秘回复","考察","交流","收盘价","开盘价","暴跌","暴涨","新高","新低","涨停","跌停","概念","现报","异动","大跌","大涨","走弱","早盘","净买入","大宗交易","公告精选","每日收评","点评","快速回调","快速上涨","股价异动","融资融券","消费参考","投资者提问","跑输大盘","跑赢大盘","净偿还","融资余额","累计涨幅","净流入","净流出","净卖出","集锦","看点","周报","加速下跌","加速上涨","点赞","净卖出","龙虎榜","精选","今日","快讯","盘前","盘后","午盘","跑输","大盘","沪股通","深股通","大学","快报"]
        for d in deny_list:
            if re.search(d, data_title):
                return 0
        tiaojia = ["提价","涨价","价格调整","价格上调","价格下调","调价函","投产","增产","扩产","生产线","产量"]
        for d in tiaojia:
            if re.search(d, data_title):
                self.table = "relation_data_03_tiaojia"
                return 3001
        jianguan = ["状告","败诉","胜诉","造假","监管","违法","罚款","被罚","问询","违法","警示函","遭罚","违规","重罚","行政处罚","重罚","处罚"]
        for d in jianguan:
            if re.search(d, data_title):
                self.table = "relation_data_03_chufa"
                return 3002
        susong = ["关注函","涉案","执行金额","宣判","受贿","上诉","法院","诉讼","纠纷","侵犯","侵权","商标","仲裁","判决","驳回","冻结","索赔","被执行人","撤诉","起诉","违约","一审","二审","终审","重审","获赔","索偿","官司","判赔"]
        for d in susong:
            if re.search(d, data_title):
                self.table = "relation_data_03_susong"
                return 3003
        diaoyan = ["评级","目标价","审计报告"]
        for d in diaoyan:
            if re.search(d, data_title):
                self.table = "relation_data_03_diaoyan"
                return 3004
        chanyelian = ["议价","生产成本","行业","解禁","基金","中标","担保","产业","上游","下游","关联子公司","关联公司","子公司","原材料","系列产品"]
        for d in chanyelian:
            if re.search(d, data_title):
                self.table = "relation_data_03_chanyelian"
                return 3005
        guanli = ["10派","分配方案","资产重组","质押","派息","注册会计师","独立董事","股东大会","董事","公司高管","增发","募资","回购","激励","辞职","会议","摘牌","收购","决议","发行","入股","定增","公司章程","增资","扩股","拆股","董事会","员工持股","增持","续聘","澄清","赎回","减持"]
        for d in guanli:
            if re.search(d, data_title):
                self.table = "relation_data_03_guanli"
                return 3006
        yeji = ["政府补助","净利润","净利","营收","年度报告","预减","预增","季度报告","季报","年报","预亏","业绩","预盈","并购","亏损","盈利"]
        for d in yeji:
            if re.search(d, data_title):
                self.table = "relation_data_03_yeji"
                return 3007
        self.table = "relation_data_03_other"
        return 3999

    def get_online_data(self, code, code_name, info_name):
        for i in range(0,16):
            url = self.get_baidu_url(code_name, info_name, i)
            #print(url)
            soup = MyHttp.bs4_utf8_data_v2(url)
            #print(soup)
            datas = soup.find_all(name="div", attrs={'class':'result-op c-container xpath-log new-pmd'})
            #print(datas)
            print(len(datas))
            for d in datas:
                #time.sleep(1)
                #待处理
                try:
                    data_site = d.find(name="span", attrs={'class':'c-color-gray c-font-normal c-gap-right'}).text
                    if False == self.check_site(data_site):
                        continue
                    data_title = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["aria-label"].replace("标题：","")

                    result = re.search(code_name, data_title)
                    print(info_name)
                    if result:
                        pass
                    else:
                        print(data_title)
                        continue

                    relation_new = self.get_code(data_title)
                    print("relation_new:" + str(relation_new))
                    if 0 == relation_new:
                        #file_title.write(data_title + "\n")
                        continue


                    data_url = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["href"]
                    data_date = d.find(name="span", attrs={'class':'c-color-gray2 c-font-normal'}).text
                    #print(type(data_date))
                    #print(data_date)
                    data_split = re.sub("[\u4E00-\u9FFF]+", '|', data_date).split('|')
                    data_split = [d for d in data_split if d != '']
                    print(data_split)
                    #data_split = data_date.replace('年'，'|').replace('月'，'|').replace('日'，'|').split('|')
                    date_res = ""
                    if len(data_split) == 3:
                        if int(data_split[0]) < 2018:
                            continue
                        date_res = "{:4d}-{:02d}-{:02d}".format(int(data_split[0]),int(data_split[1]),int(data_split[2]))
                    elif len(data_split) == 2:
                        date_res = "2022-{:02d}-{:02d}".format(int(data_split[0]),int(data_split[1]))
                    else:
                        continue

                    if relation_new == 3999:
                        self.add_attr_data_temp(code, relation_new, data_title, date_res, data_url, data_site)
                    else:
                        self.add_attr_data(code, data_title, date_res, data_url, data_site)
                except Exception as e:
                    print(str(e))
                    pass

    def query_page_data(self, date_str, page):
        try:
            param = "pageNum=" + str(page) + "&pageSize=30&column=szse&tabName=fulltext&plate=&stock=&searchkey=&secid=&category=&trade=&seDate=" + date_str + "~" + date_str + "&sortName=&sortType=&isHLtitle=true"
            proxy = self.business.query_proxy()
            my_proxy = {
                "http"  : proxy,
                "https"  : proxy
            }
            r = requests.post("http://www.cninfo.com.cn/new/hisAnnouncement/query", data=param, headers=headers, proxies=my_proxy)
            json_data = r.json()
            keys = json_data.keys()
            if "totalpages" not in keys:
                return False
            
            page_max = int(json_data["totalpages"])
            if 0 == page_max:
                return False
            
            if "announcements" not in keys:
                return False
            
            for d in json_data["announcements"]:
                d_code = d["secCode"]
                d_url = "http://www.cninfo.com.cn/new/disclosure/detail?stockCode=" + d_code + "&announcementId=" + d["announcementId"] + "&orgId=" + d["orgId"] + "6&announcementTime=" + date_str
                d_title = d["announcementTitle"]
                d_site = "巨潮资讯网"
                d_relation = self.get_code(d_title)
                if d_relation == 3999:
                    self.add_attr_data_temp(d_code, d_relation, d_title, date_str, d_url, d_site)
                    #print(d_title)
                    #print(d_url)
                else:
                    self.add_attr_data(d_code, d_title, date_str, d_url, d_site)
            
            if page_max > page:
                return True
            return True
        except Exception as e:
            print(str(e))
            return False
        
        
    def query_data_by_time(self):
        for i in range (0, 180):
            date_new = datetime.datetime.now() - datetime.timedelta(days = i)
            date_str = str(date_new)[0:10]
            print(date_str)
            page = 1
            #time.sleep(3)
            while self.query_page_data(date_str, page):
                print(page)
                page = page + 1
                #time.sleep(3)
    
    def query_stock_page_data(self, code, page, exchange):
        date_str = ''
        try:
            exchange_tag = ''
            if 1 == exchange:
                exchange_tag = 'gssh0'
            elif 2 == exchange:
                exchange_tag = 'gssz0'
            elif 3 == exchange:
                exchange_tag = 'gfbj0'
                
            param = "pageNum=" + str(page) + "&pageSize=30&column=szse&tabName=fulltext&plate=&stock=" + code + "," + exchange_tag + code + "&searchkey=&secid=&category=&trade=&seDate=&sortName=&sortType=&isHLtitle=true"
            proxy = self.business.query_proxy()
            my_proxy = {
                "http"  : proxy,
                "https"  : proxy
            }
            r = requests.post("http://www.cninfo.com.cn/new/hisAnnouncement/query", data=param, headers=headers, proxies=my_proxy)
            json_data = r.json()
            keys = json_data.keys()
            if "totalRecordNum" not in keys:
                print("no data\n")
                return False
            
            max_count = int(json_data["totalRecordNum"])
            if 0 == max_count:
                print("max:" + str(max_count))
                return False
            
            if "announcements" not in keys:
                print("no data-1\n")
                return False
            
            for d in json_data["announcements"]:
                d_code = d["secCode"]
                d_url = "http://www.cninfo.com.cn/new/disclosure/detail?stockCode=" + d_code + "&announcementId=" + d["announcementId"] + "&orgId=" + d["orgId"] + "6&announcementTime=" + date_str
                d_title = d["announcementTitle"]
                time_stamp = d["announcementTime"] / 1000
                time_array = time.localtime(time_stamp)
                date_str = time.strftime("%Y-%m-%d", time_array)
                d_site = "巨潮资讯网"
                d_relation = self.get_code(d_title)
                if d_relation == 3999:
                    self.add_attr_data_temp(d_code, d_relation, d_title, date_str, d_url, d_site)
                    #print(d_title)
                    #print(d_url)
                else:
                    self.add_attr_data(d_code, d_title, date_str, d_url, d_site)
            
            if (max_count / 30 + 1) > page:
                return True
            return True
        except Exception as e:
            print(str(e))
            return True    
    
    def query_data_by_stock(self):
        for k,v in self.stock_exchange.items():
            print(v)
            page = 1
            self.query_stock_page_data(k, page, v)
            #time.sleep(1)
            '''
            while self.query_stock_page_data(k, page, v):
                print(page)
                page = page + 1
                if page > 2:
                    break
                #time.sleep(1)
            '''

    def query_stock(self):
        sql = "select Fcode,Fname from entity_gupiao where Fid>=" + str(self.start_id) + " and Fid < " + str(self.end_id) + ";"
        stock_set = self.db.query_sql(sql)
        return stock_set
        
    def query_stock_exchange(self):
        sql = "select Fcode,Fexchange_id from entity_gupiao where Fid>=" + str(self.start_id) + " and Fid < " + str(self.end_id) + ";"
        stock_set = self.db.query_sql(sql)
        return stock_set
    
    def get_data(self):
        stock_info = self.query_stock()
        exchange_info = self.query_stock_exchange()
        for s in stock_info:
            self.stock_name[s[1]] = s[0]
        for s in exchange_info:
            self.stock_exchange[s[0]] = s[1]
        #self.query_data_by_time()
        self.query_data_by_stock()

def my_thread(start, end):
    d = JuchaoData(start, end)
    d.get_data()

if __name__ == '__main__':
    threads = []
    for i in range(0, 430):
        t = threading.Thread(target=my_thread, args=(i * 137, (i + 1)*137))
        t.daemon = True
        threads.append(t)
    for t in threads:
        t.start()
    for t in threads:
        t.join()
