from bs4 import BeautifulSoup
from common.my_time import MyTime
from common.my_http import MyHttp
import re
import time


class MyBaidu:
    
    @staticmethod
    def get_url_order_by_time(words, page):
        word_all = ' '.join(words)
        url = "https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd=" + word_all + "&medium=0&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=" + str(10 * (page - 1))
        print(url)
        return url
    
    @staticmethod
    def calc_date(baidu_date):
        data_split = re.sub("[\u4E00-\u9FFF]+", '|', baidu_date).split('|')
        data_split = [d for d in data_split if d != '']
        print(data_split)
        date_res = baidu_date
        #YYYY年MM月DD日
        if len(data_split) == 3:
            date_res = "{:4d}-{:02d}-{:02d}".format(int(data_split[0]),int(data_split[1]),int(data_split[2]))
        #MM月DD日
        elif len(data_split) == 2:
            date_res = "2022-{:02d}-{:02d}".format(int(data_split[0]),int(data_split[1]))
        elif re.search("今天", baidu_date):
            date_res = time.strftime("%Y-%m-%d", time.localtime())
        elif re.search("分钟前", baidu_date):
            date_res = time.strftime("%Y-%m-%d", time.localtime())
        elif re.search("昨天", baidu_date):
            date_res = MyTime.forward_relative_date(1)
        elif re.search("前天", baidu_date):
            date_res = MyTime.forward_relative_date(2)
        elif re.search("天前", baidu_date):
            date_res = MyTime.forward_relative_date(int(data_split[0]))
        elif re.search("小时前", baidu_date):
            date_res = MyTime.forward_relative_date_by_hour(int(data_split[0]))
        elif re.search("前天", baidu_date):
            date_res = time.strftime("%Y-%m-%d", time.localtime())
        return date_res
    
    @staticmethod
    def get_baidu_data(url):
        soup = MyHttp.bs4_utf8_data_v2(url)
        datas = soup.find_all(name="div", attrs={'class':'result-op c-container xpath-log new-pmd'})
        #print(datas)
        #print(len(datas))
        res = []
        for d in datas:
            try:
                data = {}
                data["site"] = d.find(name="span", attrs={'class':'c-color-gray c-font-normal c-gap-right'}).text
                data["title"] = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["aria-label"].replace("标题：","")
                data["url"] = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["href"]
                data["date"] = d.find(name="span", attrs={'class':'c-color-gray2 c-font-normal'}).text
                res.append(data)
            except Exception as e:
                print(str(e))
        return res

    @staticmethod
    def get_baidu_data_by_proxy(url, proxy):
        soup = MyHttp.bs4_utf8_data_v2_with_proxy(url, proxy)
        datas = soup.find_all(name="div", attrs={'class':'result-op c-container xpath-log new-pmd'})
        #print(datas)
        print(len(datas))
        res = []
        for d in datas:
            try:
                data = {}
                data["site"] = d.find(name="span", attrs={'class':'c-color-gray'}).text
                data["title"] = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["aria-label"].replace("标题：","")
                data["url"] = d.find(name="a", attrs={'class':'news-title-font_1xS-F'}).attrs["href"]
                data["date"] = d.find(name="span", attrs={'class':'c-color-gray2 c-font-normal c-gap-right-xsmall'}).text
                res.append(data)
            except Exception as e:
                print(str(e))
        return res

    @staticmethod
    def check_site(site_name):
        site_lists = ["中国新闻网","新浪","新华","东方财富网","同花顺","网易","金融界","证券之星","每日经济新闻","中国财经信息网","和讯","中国经济网","格隆汇","中国网","人民资讯","中证网","第一财经","证券时报","华夏时报","中华网","经济观察报","人民网","中国民航网"]
        for d in site_lists:
            if re.search(d, site_name):
                return True
        #print(site_name)
        return False

    @staticmethod
    def not_support_site(site_name):
        site_lists = ["回收", "老师", "公司", "哥", "说", "二手", "代理"]
        for d in site_lists:
            if re.search(d, site_name):
                return True
        return False

    @staticmethod
    def check_title(data_title, req_para, deny = []):
        if data_title[0:3] == "...":
            return False
        if (len(deny) == 0):
            deny_tag = ["|","!","?",";","-"]
            for tag in deny_tag:
                if data_title.find(tag) >= 0:
                    return False
        else:
            for tag in deny:
                if data_title.find(tag) >= 0:
                    return False

        deny_list = ["吗","灵异","阴魂","为何","如何","历史","什么","一周看天下","宫廷","话题","7x","—","，","_","。","提问","角度","证券代码","股价回撤","股东户数","开启申购","交易异常波动","新股申购","丨","偏离值达到","户均持股","盘中跌幅","盘中涨幅","(系列)","互动平台","异常波动","封板","快速反弹","集体接待日","量价齐升","强势特征","短线","每日追踪","利空","利好","董秘回复","考察","交流","收盘价","开盘价","暴跌","暴涨","新高","新低","涨停","跌停","概念","现报","异动","大跌","大涨","走弱","早盘","净买入","大宗交易","公告精选","每日收评","点评","快速回调","快速上涨","股价异动","融资融券","消费参考","投资者提问","跑输大盘","跑赢大盘","净偿还","融资余额","累计涨幅","净流入","净流出","净卖出","集锦","看点","周报","加速下跌","加速上涨","点赞","净卖出","龙虎榜","精选","今日","快讯","盘前","盘后","午盘","跑输","大盘","沪股通","深股通","大学","快报","【观点】"]
        for d in deny_list:
            if re.search(d, data_title):
                return False
        
        #两层结构[[],[]]
        for v1 in req_para:
            req_tag = False
            for v2 in v1:
                if re.search(v2, data_title):
                    req_tag = True

            if False == req_tag:
                return False

        return req_tag

    @staticmethod
    def check_title_dingzhi(data_title, req_para, deny = []):
        if data_title[0:3] == "...":
            return False
        if (len(deny) == 0):
            deny_tag = ["|","!","?",";","-"]
            for tag in deny_tag:
                if data_title.find(tag) >= 0:
                    return False
        else:
            for tag in deny:
                if data_title.find(tag) >= 0:
                    return False
        '''
        deny_list = ["吗","灵异","阴魂","为何","如何","历史","什么","一周看天下","宫廷","话题","7x","—","，","_","。","提问","角度","证券代码","股价回撤","股东户数","开启申购","交易异常波动","新股申购","丨","偏离值达到","户均持股","盘中跌幅","盘中涨幅","(系列)","互动平台","异常波动","封板","快速反弹","集体接待日","量价齐升","强势特征","短线","每日追踪","利空","利好","董秘回复","考察","交流","收盘价","开盘价","暴跌","暴涨","新高","新低","涨停","跌停","概念","现报","异动","大跌","大涨","走弱","早盘","净买入","大宗交易","公告精选","每日收评","点评","快速回调","快速上涨","股价异动","融资融券","消费参考","投资者提问","跑输大盘","跑赢大盘","净偿还","融资余额","累计涨幅","净流入","净流出","净卖出","集锦","看点","周报","加速下跌","加速上涨","点赞","净卖出","龙虎榜","精选","今日","快讯","盘前","盘后","午盘","跑输","大盘","沪股通","深股通","大学","快报","【观点】"]
        for d in deny_list:
            if re.search(d, data_title):
                return False
        '''
        #两层结构[[],[]]
        for v1 in req_para:
            req_tag = False
            for v2 in v1:
                if v2 == data_title:
                    return False
                if re.search(v2, data_title):
                    req_tag = True

            if False == req_tag:
                return False

        return req_tag
