import requests
import re
import csv

class EPS():
    def __init__(self):
        self.session = requests.Session()
        self.data_url = "http://olap.epsnet.com.cn/center.do"

    def query(self, param):
        '''
            通过给的param去向服务器索取相应的数据
            :params param: 从开发者工具上面扒下来的param，注意使用r''括起来
            :return: 如果成功获取数据，就返回待处理的原始数据表，否则抛出异常
        '''
        sid = re.search('"sid":"(.*?)"}', param).group(1)

        headers = {
            "Accept": "application/json, text/plain, */*",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8;",
            "Host": "olap.epsnet.com.cn",
            "Origin": "http://olap.epsnet.com.cn",
            "Pragma": "no-cache",
            "Referer": f"http://olap.epsnet.com.cn/auth/platform.html?sid={sid}",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36 Edg/80.0.361.69",
        }

        form_data = {
            "param": param,
            "sid": sid
        }

        try:
            resp = self.session.post(url=self.data_url, headers=headers, data=form_data).json()[0].get("tableVO").get("values")
            return self._raw(resp)
        except:
            raise "Wrong param! check it!"


    def write_csv(self, raw_data: list, file_path: str, raw=False):
        '''
            将query到的原始数据列表写入到csv文件当中
            :params rawdata: 由query获取到的原始数据列表
            :params file_path: 需要写入的文本文件的路径
            :params raw: 是否写入与eps数据库相同格式的内容，默认为否，即导出格式化好的数据
        '''
        with open(file_path, "w", encoding="utf-8") as f:
            if raw:
                writer = csv.writer(f)
                writer.writerows(raw_data)
            else:
                formatted_data = self._formatted(raw_data)
                writer = csv.DictWriter(f, formatted_data[0].keys())
                writer.writeheader()
                writer.writerows(formatted_data)

    def _formatted(self, table):
        '''
            将eps数据库当中的原始数据格式化为常见的面板数据
            :params table: 由query给出的原始数据列表
            :return: 具有region, year, value1, value2 ... 等键的字典列表
        '''
        result = []
        key = ""
        # table的第一行是年份
        for index, item in enumerate(table[0]):
            if item:
                break
        year_lst = table[0][index:] # 第一个非None值以后所有的值都算上

        gap = len(table[0]) - len(year_lst)
        for row in table[1: ]:
            if row[0]: 
                key = row[0] # 发现新指标以后就要进行key的更新

            for index, col in enumerate(row[gap:]):
                tmp = {
                    "region": row[1],
                    "year": year_lst[index],
                    key: col
                }

                index_plus_one = self._check(tmp, result)
                if index_plus_one: 
                    # 如果元素已经存在的话，那就更新一下
                    result[index_plus_one-1].update(tmp)
                else:
                    result.append(tmp)
        return result

    def _check(self, item: dict, dict_lst: list) -> bool:
        if dict_lst:
            for index, l in enumerate(dict_lst):
                if l["region"] == item["region"] and l["year"] == item["year"]:
                    return index+1 # 同样区域，同样时间的元素存在才是找到了
            return False
        else:
            return False # 列表为空自然是没有找到


    def _raw(self, table):
        '''
            返回和eps数据库当中的表格完全一模一样的内容
        '''
        result = []
        for row in table:
            result.append([self._parse(r) for r in row])
        return result

    def _parse(self, item):
        if item:
            return item.get("value")
        else:
            return None


# form_data = { # param当中的内容一条都不能删
#     "param": '{"newSheetId":"' + str(int(time.time())) + '000s0.9841280428782073",' + # 随机字符串，不能删
#         r'"s1":{"dealStr":"empty"},"s2":{"method":"","type":""},"s3":{"index8020":"0","fontStyle8020":"","bgColor8020":""},"s4":{"orderStr":"","index":""},"s5":{"dealStr":"","showBackColor":"","showFontStyle":""},' + # 看上去不会发生变化的内容，不能删
#         '"cubeId":' + cubeID + # 所查询数据库的id
#         ',"sheetId":"D84041D1AF9B4A5881AA562A21F9B11D",' + # 看上去可有可无的样子，不能删
#         r'"dims":"[{\"codeName\":\"regionCode\",\"codes\":[\"1503\",\"1504\",\"1505\",\"1506\",\"1507\",\"1508\",\"1509\",\"1511\",\"1510\",\"1512\",\"1513\"]},' + # 显然是表区域的字段
#         r'{\"codeName\":\"timeCode\",\"codes\":[\"2016010101\",\"2017010101\",\"2015010101\",\"2014010101\",\"2013010101\",\"2012010101\",\"2011010101\",\"2010010101\",\"2009010101\",\"2008010101\",\"2007010101\",\"2006010101\",\"2005010101\",\"2004010101\",\"2003010101\",\"2002010101\",\"2001010101\",\"2000010101\"]},' + # 显然是表时间段的字段
#         r'{\"codeName\":\"indicatorCode\",\"codes\":[\"0401\",\"0403a\"]},' + # 指标id
#         r'{\"codeName\":\"classify_code\",\"codes\":[\"01\"]}]","metaColumns":"indicatorCode-regionCode-classify_code","metaRows":"timeCode",' + 
#         '"sid":"' + sid + '"}', # sid
#     "sid": sid,
# }


if __name__ == "__main__":
    eps = EPS()
    param = r'{"newSheetId":"1584719804000s1.3214255499472105","s1":{"dealStr":"empty"},"s2":{"method":"","type":""},"s3":{"index8020":"0","fontStyle8020":"","bgColor8020":""},"s4":{"orderStr":"","index":""},"s5":{"dealStr":"","showBackColor":"","showFontStyle":""},"cubeId":627,"sheetId":"8E496B9CBE214ED49D55DDF1359150A0","dims":"[{\"codeName\":\"regionCode\",\"codes\":[\"0904\",\"0905\",\"0906\",\"0907\",\"0908\",\"0909\"]},{\"codeName\":\"timeCode\",\"codes\":[\"2003010101\",\"2004010101\",\"2005010101\",\"2007010101\",\"2008010101\",\"2009010101\",\"2006010101\",\"2010010101\"]},{\"codeName\":\"indicatorCode\",\"codes\":[\"0401\",\"0701\",\"130101\",\"130102\"]},{\"codeName\":\"classify_code\",\"codes\":[\"01\"]}]","metaColumns":"indicatorCode-regionCode-classify_code","metaRows":"timeCode","sid":"6D67CD23E0D767EC8F0315CAF7696BD2"}'
    raw_data = eps.query(param)
    eps.write_csv(raw_data, "data.csv")