from bilibili_api import comment, sync, video, Credential

from urllib.error import HTTPError, URLError
import urllib.request as urlreq
from IP_Pool import GetRandProxy
from bs4 import BeautifulSoup
from datetime import datetime
from typing import List, Dict
from http import cookiejar
from tqdm import tqdm
import baiduspider
import pandas as pd
import MySQLdb
import json
import time
import os
import re


def SQLExec(sql_cmd) -> str:
    '''
    @param:
        sql_cmd: str or List[str], sql commands
    @function:
        execute given sql commands
    @return:
        result of execution
    '''
    ret = ''
    if type(sql_cmd) is str:
        sql_cmd = [sql_cmd, ]
    elif type(sql_cmd) is not List:
        raise ValueError("Only")
    with open('./mysql_settings.json', 'r', encoding='utf-8') as f:
        db_info = json.load(f)
    with MySQLdb.connect(db_info['host_address'], db_info['user_name'], db_info['passwd']) as db:
        cursor = db.cursor()
        for sql in sql_cmd:
            try:
                cursor.execute(sql)
                db.commit()
                ret += cursor.fetchall()
            except:
                db.rollback()
    return ret


class TextSpider_urllib(object):
    '''
    web spider based on urllib
    '''
    def __init__(self, rootURL: str) -> None:
        self.rootURL = rootURL
        self.proxy_on = False
        self.proxy = None
        self.Cookie = cookiejar.CookieJar()
        self.cookie_handler = urlreq.HTTPCookieProcessor(self.Cookie)
        self.http_handler = urlreq.HTTPHandler()
        self.https_handler = urlreq.HTTPSHandler()
        self.proxy_handler = urlreq.ProxyHandler(self.proxy)
        self.auth = urlreq.HTTPBasicAuthHandler()
        self.opener = urlreq.build_opener(self.proxy_handler, self.auth,
                                           self.http_handler,
                                           self.https_handler,
                                           self.cookie_handler)

    def ProxyOn(self) -> None:
        self.proxy_on = True
        
    def ProxyOff(self) -> None:
        self.proxy_on = False
        self.proxy = None

    def SetProxy(self, proxy: str) -> None:
        '''
        @param:
            proxy: string like "ip_address:port"
        @function:
            set proxy IP to access Internet
        @return:
            None
        '''
        if self.proxy_on:
            self.proxy = proxy
            self.proxy_handler = urlreq.ProxyHandler(self.proxy)
            self.opener = urlreq.build_opener(self.proxy_handler, self.auth,
                                               self.http_handler,
                                               self.https_handler,
                                               self.cookie_handler)
    def get_page(self, URL: str) -> str:
        try:
            req = urlreq.Request(URL)
            rsb = self.opener.open(req)
            html_text = rsb.read().decode(errors='ignore')
            return html_text
        except:
            return ""


class BaiduForumSpider_urllib(TextSpider_urllib):
    '''
    web spider for Baidu forum based on urllib
    '''
    def __init__(
        self, 
        ForumMainUrl: str = r'https://tieba.baidu.com/f?kw=%E4%BA%AC%E5%89%A7&ie=utf-8&tp=0&pn='
        ) -> None:
        super().__init__(ForumMainUrl)
        self.CurPageNum = 1
        self.CurURLIndex = 0
        self.HtmlText = ''
        self.UrlBuffer: List = []
        self.TitleBuffer: List = []
        if not os.path.exists('./input_data'):
            os.mkdir('./input_data')
        if not os.path.exists('./input_data/BaiduForums'):
            os.mkdir('./input_data/BaiduForums')

    def WriteLog(self) -> None:
        with open('./BaiduForumSpider_urllib_Record.log',
                  'w', encoding='utf-8') as f:
            f.write('%d\n%d\n' % (self.CurPageNum, self.CurURLIndex))
            f.write('\n'.join(self.TitleBuffer))
            f.writelines('\n'+'\n'.join(self.UrlBuffer))

    def ReadLog(self) -> tuple:
        if os.path.exists('./BaiduForumSpider_urllib_Record.log'):
            with open('./BaiduForumSpider_urllib_Record.log',
                      'r', encoding='utf-8') as f:
                try:
                    tmp = f.readline()
                    self.CurPageNum = int(tmp)
                except ValueError as err:
                    print(err)
                    return None
                try:
                    tmp = f.readline()
                    self.CurURLIndex = int(tmp)
                except ValueError as err:
                    print(err)
                    return None
                buf = f.readlines()
                buf_len = len(buf)
                if buf_len == 0:
                    return None
                self.TitleBuffer = buf[:len/2]
                self.TitleBuffer = [i.strip(' \n') for i in self.TitleBuffer]
                self.UrlBuffer = buf[len/2:]
                self.UrlBuffer = [i.strip(' \n') for i in self.UrlBuffer]
            return self.CurPageNum, self.CurURLIndex, self.TitleBuffer, self.UrlBuffer
        return None
    
    

    def ReadTopic(self, topic_url: str):
        '''
        @param:
            topic_url: URL str of taregt tpoic
        @function:
            read page from given url and store text in file
        @return:
            None
        '''
        html_text = self.get_page(topic_url)

        soup = BeautifulSoup(html_text, "html.parser")
        texts = [str(i) for i in soup.find_all(name='div', attrs={'class': 'd_post_content j_d_post_content'})]

        texts = [re.sub(r'<[^><]*>|[ ]{2,}', "", i) for i in texts]
        Time = [str(i) for i in soup.find_all(name='span', attrs={'class': 'tail-info'})]
        Time = [re.sub(r'<[^><]*>|[ ]{2,}', "", i) for i in Time]
        Time = [i for i in Time if re.match('[0-9]{4,4}-[0-9]{2,2}-[0-9]{2,2}', i) is not None]
        return Time[0], texts

    def GetTopic(self, recover: bool = True) -> None:
        '''
        @param:
            recover: Try to revocer from last accidental quit
        @function:
            read all the topics in current page
        @return:
            None
        '''
        if not recover or (recover and self.ReadLog() is None):
            URL = self.rootURL+'%d'%((self.CurPageNum-1)*50)
            req = urlreq.Request(URL)
            try:
                rsb = self.opener.open(req)
            except URLError as error:
                print(error)
                if self.proxy_on:
                    self.SetProxy(GetRandProxy(self.proxy))
                else:
                    time.sleep(2)
                rsb = self.opener.open(req)

            self.HtmlText = rsb.read().decode(errors='ignore')
            soup = BeautifulSoup(self.HtmlText, "html.parser")

            items = [str(i) for i in soup.find_all(name='a', attrs={'class': 'j_th_tit'})]
            titles = [re.findall('title=\"([^\"]*)\"', i)[0] for i in items]
            TopicUrls = [r'https://tieba.baidu.com'+re.findall('href=\"([^\"]*)\"', i)[0] for i in items]
            self.TitleBuffer = titles.copy()
            self.UrlBuffer = TopicUrls.copy()
            range_begin = 0
        else:
            URL = self.rootURL+'%d' % ((self.CurPageNum-1)*50)
            range_begin = self.CurURLIndex
            titles = self.TitleBuffer.copy()
            TopicUrls = self.UrlBuffer.copy()

        print('Fetching topic from page %2d(%s) ...' % (self.CurPageNum, URL))

        if not os.path.exists('./input_data/BaiduForums/page%d' % self.CurPageNum):
            os.mkdir('./input_data/BaiduForums/page%d' % self.CurPageNum)

        for i in tqdm(range(range_begin, len(titles))):
            print('  Reading\"%s\"(%s)...' % (titles[i], TopicUrls[i]))
            self.CurURLIndex = i
            self.WriteLog()
            try:
                Time, texts = self.ReadTopic(TopicUrls[i])
            except HTTPError as error:
                print(error)
                time.sleep(1)
                Time, texts = self.ReadTopic(TopicUrls[i])
            except IndexError as error:
                print(error)
                self.SetProxy(GetRandProxy(self.proxy))
                time.sleep(2)
                Time, texts = self.ReadTopic(TopicUrls[i])

            res = time+' '+TopicUrls[i]+'\n'+'\n'.join(texts)
            titles[i] = titles[i].replace(' ', '_')
            titles[i] = titles[i].replace('\n', '_')
            titles[i] = re.sub(r'[\\\*/><\?\""]', '', titles[i])
            with open('./input_data/BaiduForums/page%d/%s.txt' % (self.CurPageNum, titles[i]), 'w', encoding='utf-8') as f:
                f.write(res)
            time.sleep(1)
        self.CurPageNum += 1


class jingju_spider(TextSpider_urllib):
    def __init__(self) -> None:
        super().__init__(r'http://jingju.com/audio.php?p=')
        self.cur_page = 1
        
    def run(self, start_page=1, end_page=534) -> None:
        start_page = 1 if start_page <= 0 or start_page > 534 else start_page
        end_page = 534 if end_page <= 0 or end_page > 534 else end_page
        df = pd.DataFrame(columns=['唱段', '演员', '剧目'])
        for page_num in tqdm(range(1,535)):
            full_url = self.rootURL+str(page_num)
            html_text = self.get_page(full_url)
            soup = BeautifulSoup(html_text, "html.parser")
            parts = soup.find_all(name='span', attrs={'class':'line'})
            parts = [re.sub(r'<[^><]*>|[ ]{2,}', "", str(i)) for i in parts]
            artists = soup.find_all(name='td', class_=False)[1::2]
            artists = [re.sub(r'<[^><]*>|[ ]{2,}', "", str(i)) for i in artists]
            repertory = soup.find_all(name='td', attrs={'class':'text-navy'})
            repertory = [re.sub(r'<[^><]*>|[ ]{2,}', "", str(i)) for i in repertory]
            
            data = pd.DataFrame([[i,j,k] for i,j,k in zip(parts, artists, repertory)], columns=['唱段', '演员', '剧目'])
            df = df.append(data, ignore_index=True)
            df.to_csv('./repertory.csv', index=None)
            time.sleep(0.1)


def url_utf8_str(s):
        return str(s.encode('utf-8'))[2:-1].replace('\\x', '%').upper()


class BaikeSpider(TextSpider_urllib):
    def __init__(self, rootURL : str = 'https://baike.baidu.com/item/') -> None:
        super().__init__(rootURL)
        self.df = pd.DataFrame()
        self.n_data = 0
        # self.search_engine = baiduspider.BaiduSpider()
        if not os.path.exists('./input_data'):
            os.mkdir('./input_data')
        if not os.path.exists('./input_data/BaiduBaike'):
            os.mkdir('./input_data/BaiduBaike')
        
    def fetch(self, key_word):
        # try:
        #     page = self.search_engine.search_baike('%s 京剧'%key_word)
        #     full_url = page.results[0].url.replace('https://baike.baidu.com', '', 1).replace('?sefr=ps', '')
        #     if full_url[:5] != 'https':
        #         full_url = 'https://baike.baidu.com' + full_url
        # except:
        #     full_url = self.rootURL + url_utf8_str(key_word)
        full_url = self.rootURL + url_utf8_str(key_word)
        req = urlreq.Request(full_url)
        rsb = self.opener.open(req)
        HtmlText = rsb.read().decode("UTF-8", errors='ignore')
        soup = BeautifulSoup(HtmlText, "html.parser")
        
        try:
            desc = re.sub(
                '<[^><]*>|[ \n\xa0]*|\[[0-9]*\]', 
                '', 
                str(soup.find_all(name='dd', attrs={'class':'desc'})[0])
            )
        except IndexError:
            desc = re.sub(
                '<[^><]*>|[ \n\xa0]*|\[[0-9]*\]', 
                '', 
                str(soup.find_all(name='div', attrs={'class':'lemma-summary'})[0])
            )
        gender = '女' if '女' in desc else ('男' if '男' in desc else '')
        
        keys = soup.find_all(name='dt', attrs={'class':'basicInfo-item'})
        keys = [re.sub('<[^><]*>|[ \n]*|\\xa0','',str(i)) for i in keys]
        
        values = soup.find_all(name='dd', attrs={'class':'basicInfo-item'})
        values = [re.sub('<[^><]*>|[ \n]*|\\xa0','',str(i)) for i in values]
        
        if key_word in list(self.df['词条名']):
            row_num = list(self.df[self.df['词条名'] == key_word].index)[0]
            incr = 0
        else:
            row_num = self.n_data
            incr = 1
            self.df.loc[self.n_data, '词条名'] = key_word
        
        zipped = zip(keys, values)
        for key,value in zipped:
            self.df.loc[row_num, key] = value
        if self.df.loc[row_num, '性别'] not in ['男', '女']:
            self.df.loc[row_num, '性别'] = gender
        self.n_data += incr
    
    def fetch_list(self, key_word_list, file_name:str, append:bool=True):
        if file_name[-4:] == '.csv':
            file_name = file_name[:-4]
        if os.path.exists('./input_data/BaiduBaike/%s.csv' % file_name) and append == True:
            self.df = pd.read_csv('./input_data/BaiduBaike/%s.csv' % file_name)
            self.n_data = len(self.df)
        proc_bar = tqdm(key_word_list)
        for key_word in proc_bar:
            self.fetch(key_word)
            # try:
            #     self.fetch(key_word)
            # except:
            #     # print('\nProgram exited accidentally fetching baike of "%s".' % key_word)
            #     # return
            #     print('\nUnknown error happened fetching baike of "%s".' % key_word)
            self.df.to_csv('./input_data/BaiduBaike/%s.csv' % file_name, index=False)
            # time.sleep(0.1)
            proc_bar.set_description('Fetching baike of "%s"...' % key_word)
        print('%d pages has been successfully fetched.' % self.n_data)


def ZhihuResumeGenderParse(html_text: str) -> str:
    if html_text == '':
        return 'Unknown'
    soup = BeautifulSoup(html_text, 'html.parser')
    try:
        gender = soup.find_all(name='meta', attrs={'itemprop':'gender'})[0].attrs['content']
    except:
        gender = 'Unknown'
    return gender


def ZhihuQuestionPageParse(html_text:str) -> pd.DataFrame:
    soup = BeautifulSoup(html_text, 'html.parser')
    
    answers = soup.find_all(name='span', attrs={'class':'RichText ztext CopyrightRichText-richText css-14bz7qe'})
    answers = [re.sub('<[^><]*>', '', str(i)).strip() for i in answers]

    temp = soup.find_all(name='span', attrs={'class':'UserLink AuthorInfo-name'})
    authors = [re.sub('<[^><]*>|\u200b', '', str(i)).strip() for i in temp]
    resume_URLs = ['' if authors[i]=='匿名用户' else temp[i].find_all('a')[0].attrs['href'] for i in range(len(authors))]

    n_approve = soup.find_all(name='button', attrs={'class':'Button VoteButton VoteButton--up'})
    n_approve = [''.join(i.attrs['aria-label'].strip().split(' ')[1:]) for i in n_approve]
    n_approve = list(map(lambda x: int(float(x[:-1])*10000) if x[-1]=='万' else int(x), n_approve))

    times = soup.find_all(name='div', attrs={'class':'ContentItem-time'})
    times = [i.find_all('span')[0].attrs['data-tooltip'][4:] for i in times]
    
    columns = {'author':authors, 'author_URL':resume_URLs, 'n_approve':n_approve, 'time':times, 'answer':answers}
    df = pd.concat([pd.DataFrame(i[1], columns=[i[0],]) for i in columns.items()], axis=1)
    
    return df


SESSDATA = 'dcdc765f%2C1660019805%2Cb824b%2A21'
BILI_JCT = '01c94ec3dad99856eb7a2d96217d2a61'
BUVID3 = 'D162A3D8-90D0-4CE9-9229-6C64A7D2E430143108infoc'


async def VideoInfo(bvid:str) -> Dict:
    '''
    @param:
        bvid: bvid of biliili video
    @funcion:
        get info of target video
    @return:
        dict of info
    '''
    credential = Credential(sessdata=SESSDATA, bili_jct=BILI_JCT, buvid3=BUVID3)
    v = video.Video(bvid=bvid, credential=credential)
    info = await v.get_info()
    return info


async def VideoComments(oid:str) -> Dict:
    '''
    @param:
        oid: avid,cvid,etc
    @funcion:
        get comments of target source
    @return:
        dict of comment
    '''
    comments = []
    # 页码
    page = 1
    # 当前已获取数量
    count = 0
    while True:
        c = await comment.get_comments(oid, comment.ResourceType.VIDEO, page)
        comments.extend(c['replies'])
        # 增加已获取数量
        count += c['page']['size']
        page += 1

        if count >= c['page']['count']:
            # 当前已获取数量已达到评论总数，跳出循环
            break

    return comments


def GetBilibiliVideoComments(bvid: str) -> pd.DataFrame:
    '''
    @param:
        bvid: str or List of str, bvid of a video
    @function:
        read and store comments of target video
    @return:
        Dataframe
    '''
    info = sync(VideoInfo(bvid))
    comments = sync(VideoComments(info['aid']))
    comments = [{
        # 'time':FormatTime(i['ctime']), 
        'ctime':i['ctime'], 
        'name':i['member']['uname'], 
        'gender':i['member']['sex'], 
        'content':i['content']['message']
        } for i in comments
    ]
    df = pd.DataFrame(comments)
    return df


# def SpiderRead(n_pages: int = 10) -> None:
#     spider = BaiduForumSpider_urllib()
#     for i in range(n_pages):
#         try:
#             spider.GetTopic()
#         except HTTPError as error:
#             print(error)
#             time.sleep(1)
#             spider.GetTopic()
