import pandas as pd
import numpy as np
import json
from tqdm import tqdm

"""
API与兼容性
"""
apiUsedTimes = pd.read_csv("./output/1-2API被使用的次数.csv", sep=",")  # API使用次数表
apiNode = pd.read_csv("./data/api_nodes_estimator.csv", sep="\t")  # API节点信息表
# 合并两表
df1 = (
    pd.merge(
        apiUsedTimes, apiNode.iloc[:, [1, 6]], left_on="api", right_on="url", how="left"
    )
    .drop(axis=1, index=None, columns="url")
    .dropna(how="any")
    .rename(columns={"c": "category"})
    .sort_values(by=["category", "usedTimes"], ascending=False, ignore_index=True)
)
# API使用次数与Category表
df1.to_csv("./output/3-1API使用次数和Categories表.csv", index=0)


apiPairs = pd.read_csv("./data/raw/all_pairs.csv", sep=",")  # API之间共同调用次数表
df1 = df1[df1.groupby("category").category.transform(len) > 1]
df1.index = range(len(df1))
# API之间的兼容性，即两个API被共同调用的次数
df1["compatibility"] = 0
i = 0
for api in tqdm(df1["api"]):
    df1.loc[i, "compatibility"] = apiPairs.query("'%s'==r" % api).count()[0]
    i += 1
df2 = df1[df1.groupby("category").category.transform(len) > 3]
categories = df2["category"].drop_duplicates(keep="first")
categories.index = range(len(categories))
# 进行最后的统计，并计算相关系数
ccc = {}  # catagory correlation count
for c in categories:  # categories
    corr = df1[df1["category"] == c].corr().iloc[0, 1]
    cnt = df1["category"].value_counts()[c]
    ccc[c] = {corr: cnt}
dfccc = pd.concat({k: pd.Series(v) for k, v in ccc.items()}).reset_index()
dfccc.columns = ["category", "correlation", "categoryCount"]
dfccc.dropna(how="any", inplace=True)
dfccc.sort_values(by=["categoryCount"], ascending=False, inplace=True)
dfccc.index = range(len(dfccc))
dfccc.to_csv("./output/3-2同一cate下兼容性与共同调用相关系数表.csv", index=0)




"""
API与服务协议
"""
a = open("./data/raw/api_mashup/active_apis_data.json")  # 活跃API节点服务协议信息表
activeApiInfo = json.load(a)

ApiInfo = dict()
# 根据API url检索该API服务协议，合并成表
for api in activeApiInfo:
    if api == None:
        continue
    RC = ""
    for version in api["versions"]:
        RC = RC + version["style"] + ";"
    ApiInfo[api["url"].strip()] = RC
ApiInfo1 = pd.DataFrame(list(ApiInfo.items()), columns=["url", "style"])
# 合并
df3 = pd.merge(df1, ApiInfo1.iloc[:, [0, 1]], left_on="api", right_on="url", how="left")
df3.drop(["url"], axis=1, inplace=True)
df3.dropna(how="any", inplace=True)
df3.drop(df3[df3["style"] == "Unspecified;"].index, inplace=True)
df3.index = range(len(df3))
df3.to_csv("./output/3-4API与服务协议表.csv", index=0)
# 服务协议的出现频次
df3["style"].value_counts().reset_index().to_csv(
    "./output/3-5API服务协议频次表.csv", index=0, header=["style", "count"]
)


df2 = df1[df1.groupby("category").category.transform(len) > 1]
categories = df2["category"].drop_duplicates(keep="first")
categories.index = range(len(categories))
categories
Interdata = []
# 统计各个category下的服务协议出现频次
for c in tqdm(categories):  # categories
    rs = rc = fe = ii = sr = 0
    df = df3[df3["category"] == c]
    for i in df.index:
        times = df.loc[i, "usedTimes"]
        styles = df.loc[i, "style"]

        if "REST" in styles:
            rs += times
        if "RPC" in styles:
            rc += times
        if "FEED" in styles:
            fe += times
        if "Indirect" in styles:
            ii += times
        if "Streaming" in styles:
            sr += times
    Interdata.append([c, rs, rc, fe, ii, sr])

df4 = pd.DataFrame(
    columns=[
        "category",
        "RESTCount",
        "RPCCount",
        "FEEDCount",
        "IndirectCount",
        "StreamingCount",
    ],
    data=Interdata,
)
df4.to_csv("./output/3-6category与服务协议表.csv", index=0)
print(df4)
