from urllib.parse import urljoin
import json
import random
import time

from lxml import etree
import pandas as pd
import requests


class CSSCIFactorCrawler:
    def __init__(self):
        self.info_url = 'https://navi.cnki.net/knavi/journals/searchbaseinfo'

        self.headers = {
            'Accept': 'text/plain, */*; q=0.01',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Host': 'navi.cnki.net',
            'Origin': 'https://navi.cnki.net',
            'Referer': 'https://navi.cnki.net/knavi/journals/search',
            'User-Agent': (
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                'AppleWebKit/537.36 (KHTML, like Gecko) '
                'Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.46'
            ),
            'X-Requested-With': 'XMLHttpRequest',
        }

        # rubbish copied from brower's request body
        search_state_json = {
            "StateID": "",
            "Platfrom": "",
            "QueryTime": "",
            "Account": "knavi",
            "ClientToken": "",
            "Language": "",
            "CNode": {
                "PCode": "JOURNAL",
                "SMode": "", "OperateT": ""
            },
            "QNode": {
                "SelectT": "",
                "Select_Fields": "",
                "S_DBCodes": "",
                "QGroup": [
                    {"Key": "Navi",
                     "Logic": 1,
                     "Items": [],
                     "ChildItems": [
                         {
                             "Key": "journals",
                             "Logic": 1,
                             "Items": [{
                                 "Key": "datasource",
                                 "Title": "",
                                 "Logic": 1,
                                 "Name": "EI",
                                 "Operate": "",
                                 "Value": "0009?",
                                 "ExtendType": 0,
                                 "ExtendValue": "",
                                 "Value2": ""
                             }],
                             "ChildItems": []
                         }
                     ]
                     }
                ],
                "OrderBy": "OTA|DESC",
                "GroupBy": "",
                "Additon": ""
            }
        }
        self.form_data = {
            'searchStateJson': json.dumps(search_state_json),
            'displaymode': 1,
            'pageindex': 2,
            'pagecount': 100,
            'index': 'datasource',
            'searchType': '刊名(曾用刊名)',
            'switchdata': '',
        }

        self.session = requests.Session()

    def fetch_if(self) -> None:
        """fetch all CSSCI journals IF from cnki.net"""
        result = []

        index = 1
        while True:
            current_result = list(self.fetch_single_page(index))
            if current_result:
                print(f'fetched information contained in page {index}!')
                result += current_result

                index += 1
                time.sleep(random.randint(1, 2))
            else:
                break

        df = pd.DataFrame(result)

        cols = ['期刊名称', '复合影响因子', '综合影响因子']
        cols += df.columns[~df.columns.isin(cols)].tolist()
        df = df[cols]
        df.to_csv('CSSCI_IF.csv', index=False)

    def fetch_single_page(self, index_num: int) -> list:
        """fetch single page result from self.info_url
        Args:
            index_num: page number

        Yields:
            a dict contains journal's base information
        """
        self.form_data['pageindex'] = index_num

        resp = self.session.post(
            url=self.info_url, headers=self.headers, data=self.form_data).text
        html = etree.HTML(resp)

        for item in html.xpath('//ul[@class="list_tup"]/li'):
            # yes, they use 'detials' as key
            item_detail = item.xpath('./a/div["detials"]')[1]

            result = {
                '期刊名称': item_detail.xpath('.//h1/text()')[0].strip(),
                '访问链接': urljoin(self.info_url, item.xpath('./a/@href')[0]),
                '标签': ','.join(item_detail.xpath('./span/text()')),
                '主办单位': item_detail.xpath('.//p/@title')[0],
            }

            for p in item_detail.xpath('./p/text()'):
                for col in ['复合影响因子', '综合影响因子', 'CN', 'ISSN']:
                    if col in p:
                        result[col] = p.strip()[len(col) + 1:]

            yield result


if __name__ == '__main__':
    crawler = CSSCIFactorCrawler()
    crawler.fetch_if()
    print('job done!')
