# data backend

import pandas as pd
import requests
from bs4 import BeautifulSoup
# from requests.adapters import HTTPAdapter, Retry
from datefinder import find_dates
from tqdm import tqdm
from requests_html import HTMLSession, AsyncHTMLSession 
import asyncio
import pandas as pd
import requests
from io import StringIO

class CallForPaperSource:
    def __init__(self, url):
        self.url = url

    def extract_calls(self, self_research=None):
        raise NotImplementedError("Subclasses should implement this method")

class ScienceDirectSource(CallForPaperSource):
    def __init__(self, url='https://www.sciencedirect.com/browse/calls-for-papers?subject=computers-in-earth-sciences'):
        super().__init__(url)
        self.url = url
    def extract_calls(self, self_research='earth'):
        res = asyncio.run(self._extract_calls(self_research))
        return res

    async def _extract_calls(self, self_research='earth'):
        session = AsyncHTMLSession()
        response = await session.get(self.url)
        # response.html.render()
        html_content = response.html.html
        soup = BeautifulSoup(html_content, 'html.parser')
        # 假设calls for papers的信息都在特定类别的div或a标签中
        # 找到具有特定class的div元素
        div_element = soup.find('div', class_='col-xs-22 col-lg-16')   #col-xs-22 col-lg-16
        # 在div元素内部找到ol元素
        ol_element = div_element.find('ol')
        # 在ol元素内部找到所有的li元素
        calls_list = ol_element.find_all('li',
                                         class_='publication')
        base_url = "https://www.sciencedirect.com"
        calls_data = []
        for call in tqdm(calls_list):
            title = call.find('span').text if call.find('span') else ''  # 获取标题
            link = base_url + call.find('a')['href'] if call.find('a') else ''  # 获取链接
            description = call.find('p').text if call.find('p') else ''  # 获取描述或其他信息
            split_ddl = call.find('div',
                                  class_='text-s u-text-italic').text.split('deadline:')
            if len(split_ddl) == 2 :
                ddl = next(find_dates(split_ddl[1].strip())) #datetime.strptime(split_ddl[1].strip(), '%d %B %Y')
            else:
                ddl = None
            calls_data.append({
                    'title': title,
                    'link': link,
                    'description': description,
                    'content': None,
                    'deadline': ddl,
                    'related': False
                })
        calls_data = pd.DataFrame(calls_data)
        len_calls = calls_data.shape[0]
          # input
        for si_i in tqdm(range(0, len_calls)):
            # print(si_i)
            si_link = calls_data.iloc[si_i,:]['link']   # 'https://www.sciencedirect.com/journal/applied-computing-and-geosciences/about/call-for-papers#mapping-of-earth-and-planetary-surfaces-through-statistical-learning'
            si_id = si_link.split('#')[1]
            # response2 = session.get(si_link, headers=headers)
            # soup2 = BeautifulSoup(response2.text, 'html.parser')
            response2 = await session.get(si_link)
            # response.html.render()
            html_content2 = response2.html.html
            soup2 = BeautifulSoup(html_content2, 'html.parser')
            result_set = soup2.find('h3', id=si_id).parent.find_all('p')
            text_content = ''.join([element.get_text() for element in result_set[0]])
            calls_data.loc[si_i, 'content'] = text_content
            if  calls_data.loc[si_i, 'deadline'] is pd.NaT:
                try:
                    # 尝试执行的代码块
                    txt_ddl = text_content.split('deadline:')
                    if len(txt_ddl) < 2:
                        txt_ddl = text_content.split('Deadline:')
                    if len(txt_ddl) < 2:
                        txt_ddl = text_content.split('by')
                    if len(txt_ddl) > 1:
                        # date_string = txt_ddl[1].split('\xa0')[1].split('\n')[0]
                        # 使用strptime方法解析日期字符串
                        # calls_data.loc[si_i,'deadline'] = datetime.strptime(date_string.strip(), '%B %d, %Y')
                        calls_data.loc[si_i, 'deadline'] = next(find_dates(txt_ddl[1].strip()))
                    else:
                        # print('deadline not found')
                        calls_data.loc[si_i, 'deadline'] = next(find_dates(text_content.strip()))
                        print("get first date as ddl " + str(calls_data.loc[si_i, 'deadline']))
                        print(si_i)
                except Exception as e:  # 捕获其他所有类型的异常
                    print("deadline not found:", e)
                    print(si_i)
                    print( calls_data.loc[si_i, 'deadline'])
                    print(text_content)
                else:  # 如果没有异常发生，执行这里的代码
                    print( calls_data.loc[si_i, 'deadline'])
                # finally:  # 无论是否发生异常，都会执行这里的代码
                #     print(calls_data.loc[si_i,'deadline'])

            if si_i == 3:
                calls_data.loc[si_i, 'related'] = True
         # output
        calls_data['journal'] = calls_data['description'].apply(lambda x: x.split('\xa0•\xa0')[0] )
        calls_data = calls_data.drop('description', axis=1)
        return calls_data[['title', 'journal', 'deadline', 'related', 'link', 'content']]

class TaylorFrancisSource(CallForPaperSource):
    def __init__(self, url='https://think.taylorandfrancis.com/wp-json/wp/v2/'):
        super().__init__(url)
        self.url = url
    def extract_calls(self, self_research='earth'):

        url_article = self.url +\
            "article_collections?_fields=id,date,link,title,special_issues_tax_subject_areas,article_collections&per_page=100&special_issues_tax_subject_areas=1690&page=1"
        url_si = self.url +\
            "special_issues?_fields=id,date,link,title,special_issues_tax_subject_areas,special_issues&per_page=100&special_issues_tax_subject_areas=1690&page=1"
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

        def get_tf(url):
            response = requests.get(url, headers=headers)    
            # 假设 response.text 是你从某个API获取的JSON字符串
            json_string = response.text    
            # 使用 StringIO 包装 JSON 字符串
            json_io = StringIO(json_string)    
            # 使用 read_json 读取 StringIO 对象
            df = pd.read_json(json_io)    
            # 如果需要，关闭 StringIO 对象
            json_io.close()
            # df = pd.read_json(response.text)
            # 显示数据框的内容
            return df

        # res_tf = pd.concat([get_tf(url_article),get_tf(url_si)])
        df_si = get_tf(url_si)
        df_article = get_tf(url_article)
        df_si.columns  = ['id', 'date', 'link', 'title', 'special_issues_tax_subject_areas', 'collection']
        df_article.columns  = df_si.columns
        res_tf = pd.concat([df_si, df_article], ignore_index=True)
        res_tf['title'] = res_tf['title'].apply(lambda x: x['rendered'] if 'rendered' in x else None)
        from datetime import datetime
        res_tf['title'] = res_tf['title'].apply(lambda x: (':').join(x.split(':')[1:]).strip() if ':' in x else x)
        res_tf['deadline'] = res_tf['collection'].apply(lambda x: x['_special_issues_deadline'][0] if '_special_issues_deadline' in x else x['_article_collections_deadline'][0])
        res_tf['journal'] = res_tf['collection'].apply(lambda x: x['_special_issues_journal_title'][0] if '_special_issues_journal_title' in x else x['_article_collections_journal_title'][0])
        res_tf['content'] = res_tf['collection'].apply(lambda x: x['_special_issues_copy'][0] if '_special_issues_copy' in x else  x['_article_collections_copy'][0] )
        # 注意：月份需要是月份的全名，Python 的 datetime 模块不识别缩写
        res_tf['deadline'] = res_tf['deadline'].apply(lambda x: datetime.strptime(x, '%d %B %Y'))  
        res_tf['related'] = False
        res_tf = res_tf.rename(columns={'special_issues_tax_subject_areas': 'subject'}).drop('collection', axis=1)
        return res_tf[['title', 'journal', 'deadline', 'related', 'link', 'content']]


if __name__ == '__main__':
    ds = TaylorFrancisSource()
    # ds = ScienceDirectSource()
    ds.extract_calls()
