from pyspark.sql import SparkSession
import pandas as pd

spark = SparkSession.builder. \
    appName('bs'). \
    enableHiveSupport(). \
    getOrCreate()

spark.sql("use liz")
# .filter('label==1')
df = spark.sql("select * from 0526_jz_1020_lbs_sample").repartition(1000)
pddf = df.toPandas()


geohash = pddf[['geohash', 'label']].drop_duplicates()
bs = pddf[['mcc', 'mnc', 'lac', 'main_cellid']].drop_duplicates()


pddf = A
bs1 = pddf.loc[pddf['label'] == '1', :]. \
    groupby(['mcc', 'mnc', 'lac', 'main_cellid'])['geohash'].nunique()

bs10 = pddf. \
    groupby(['mcc', 'mnc', 'lac', 'main_cellid'])['geohash'].nunique()

bsall = pd.merge(bs1.reset_index(), bs10.reset_index(), on=['mcc', 'mnc', 'lac', 'main_cellid'])
bsall['acc'] = bsall['geohash_x']/bsall['geohash_y']
bsall['recall'] = bsall['geohash_x']/6322184

bsall['F1'] = 2*bsall['acc']*bsall['recall']/(bsall['acc']+bsall['recall'])
bsall.sort_values(['F1'], ascending=False, inplace=True)
