import pyspark
import datetime

sqlContext = pyspark.sql.HiveContext(sc)

final_empis = sc.pickleFile('/home/admin/malgo-empis-18-months-10-events')
final_empi_ids = set(final_empis.keys().collect())

# 门诊
dc1001 = sqlContext.sql(
    'select t_qb_dc1001_3.empi as empi, t_qb_dc1001_3.icd_name as value, t_qb_dc1001_3.jzkssj as event_time, "门诊" as event_type from t_qb_dc1001_3')
# 药品
dc1004 = sqlContext.sql(
    'select dc1004.empi, dc1004.ypmc as value, dc1003.kdrq as event_time, "药品" as event_type from dc1004 join dc1003 on dc1004.djtxh = dc1003.jlxh')
# 手术
dc5002 = sqlContext.sql('select empi, ssmc as value, ssdm as extra, czrq as event_time, "手术" as event_type from dc5002')
# 影像学检查
dc3001 = sqlContext.sql('select empi, jcsj as event_time, jcmc as value, "影像学检查" as event_type from dc3001')
# 其他检查
dc4001 = sqlContext.sql(
    'select empi, cjrq as event_time, bgdmc as value, "检查报告" as event_type, jlxh as id, lbmc as lbmc from dc4001')
# 检查结果
dc4002 = sqlContext.sql(
    'select dc4002.empi as empi, dc4002.jczbmc as value, dc4002.ycts as ycts, "检查结果" as event_type, dc4001.cjrq as event_time from dc4002 join dc4001 on dc4002.djtxh = dc4001.jlxh')
# 住院
dc7004 = sqlContext.sql('select empi, cyzd as value, "住院" as event_type, ryrq as event_time, ryzztz, zlgc from dc7004')

sc.union(list(
    map(
        lambda df: df.filter(
            "event_time != '' and value != '' and empi != '' and event_time != \"1900-01-01 00:00:00\" and event_time < \"2018-10-10 00:00:00\"").map(
            lambda r: r.asDict()).filter(lambda r: r['empi'] in final_empi_ids),
        [dc1001, dc1004, dc5002, dc3001, dc4001, dc4002, dc7004]
    )
)).groupBy(lambda r: r['empi']).saveAsPickleFile('/home/admin/malgo/empi-events', 100000)

final_empi_events = sc.pickleFile('/home/admin/malgo/empi-events')
patients = sqlContext.sql('select empi, sex, birthday from dc_patient').map(
    lambda r: (r.empi, {'sex': r.sex, 'birthday': r.birthday})).filter(lambda r: r[0] in final_empi_ids)

empi_with_data = final_empi_events.join(patients).mapValues(lambda e: {
    'sex': e[1]['sex'],
    'birthday': e[1]['birthday'],
    'events': list(e[0])
})


def analyze_empi(p):
    events = sorted([{
        'event_type': e['event_type'],
        'event_time': datetime.datetime.strptime(e['event_time'],
                                                 '%Y-%m-%d' if e['event_type'] == '门诊' else '%Y-%m-%d %H:%M:%S'),
        'value': e['value']
    } for e in p['events']], key=lambda e: e['event_time'])

    has_naogeng = any(['脑梗' in e['value'] for e in events if e['event_type'] == '门诊' or e['event_type'] == '住院'])

    try:
        birthday = datetime.datetime.strptime(p['birthday'], '%Y%m%d') if p['birthday'] else None
    except:
        birthday = None

    return {
        'birthday': p['birthday'],
        'age': ((datetime.datetime.now() - birthday).days if birthday else 0) / 365,
        'sex': p['sex'],
        'events': events,
        'has_naogeng': has_naogeng,
    }


empi_with_data.mapValues(analyze_empi).repartition(100).saveAsPickleFile('/home/admin/malgo/empis', 100000)

all_empis = sc.pickleFile('/home/admin/malgo/empis')
all_empis.count()
all_empis.map(lambda p: len(p[1]['events'])).sum()
all_empis.take(1)


def cal_values(rdd, event_type):
    values = rdd.flatMap(
        lambda p: [e['value'] for e in filter(lambda e: e['event_type'] == event_type, p[1]['events'])]).countByValue()
    print('count: %s' % len(values))

    with open('/home/admin/malgo/%s.json' % event_type, 'w') as f:
        json.dump(values, f, ensure_ascii=False)


cal_values(all_empis, '住院')

cal_values(all_empis, '门诊')

sqlContext.sql(
    'select t_qb_dc1001_3.icd_name as icd_name, dc1001.zdms as zdms from t_qb_dc1001_3 join dc1001 on t_qb_dc1001_3.jlxh = dc1001.jlxh where t_qb_dc1001_3.icd_name is not null').map(
    lambda r: r.asDict()).groupBy(lambda r: r['zdms']).take(10)