# -*- coding: utf8 -*-

import sys
import xlrd
import logging
import glob
import datetime
import re
import os
import csv
import getopt
import onebox_feed_writer
import app_engine_writer
import re

SRC_ENC = "utf8"
LOG_ENC = "utf8"

FIELDS_MAP = {
  "姓名" : "name",
  "性别" : "sex",
  "年龄" : "age",
  "联系电话" : "contact",
  "家庭住址" : "address",
  "单位/学校" : "unit",
  "学校/单位" : "unit",
  "其他" : "other",
  "其它" : "other",
  "医院" : "hospital",
  "科室" : "section",
  "伤情" : "status",
  "受伤情况" : "status",
  "医院联系方式" : "hospital_contact",
  "医院联系电话" : "hospital_contact",
  "来源" : "source",
  "link" : "source",
  "shard" : "shard",
  "pid" : "pid"
}

# name here should match base.Person fields
FIELDS = ("name", "sex", "age", "contact", "address", "unit", "other",
          "hospital", "section", "status", "hospital_contact", "source", "shard", "pid")

# reflects the significancy of the fields
FIELDS_SCORE = (1, 1, 1, 1, 1, 1, 0,
                2, 2, 2, 2, 1, 0, 0)

def FileModifiedTime(fname):
  return os.stat(fname).st_mtime

class Person:
  def __init__(self):
    for field in FIELDS:
      self.__dict__[field] = None
    self.update_time = None
    self.score = None

  def validate(self):
    if not (bool(self.name) and (bool(self.hospital) or bool(self.hospital_contact))):
      return False
    if ("？" in self.name) or ("！" in self.name) or ("（" in self.name) or ("）" in self.name):
      return False
    if ("?" in self.name) or ("×" in self.name):
      return False
    return True

  def calc_score(self):
    score = 0
    for id in xrange(len(FIELDS)):
      score += bool(self.__dict__[FIELDS[id]]) * FIELDS_SCORE[id]
    self.score = score

  def norm(self):
    # Remove empty chars and numbers from name.
    self.name = self.name.replace(" ", "").replace("　", "")
    self.name = re.sub(r'[0-9]+', '', self.name)
    # Clean age field if it equals to 0.
    if self.age and self.age.isdigit() and int(self.age) == 0:
      self.age = None
    # Clean hospital if it does not make sense.
    if self.hospital in ["医院不详", "未知医院"]:
      self.hospital = None
    # Convert empty strings to None.
    for field in FIELDS:
      if not self.__dict__[field]:
        self.__dict__[field] = None
    # Copy other field to hospital if hospital is None.
    if not self.hospital and not self.hospital_contact:
      self.hospital = self.other
      self.other = None
    # Calculate score.
    self.calc_score()
    return self

  def __str__(self):
    fields = ', '.join(["%s:%s" % (field, self.__dict__[field]) for field in FIELDS])
    return fields + " (%d, %d)" % (self.score, self.update_time)

def NormString(str):
  str = str.strip()
  str = str.replace("&nbsp;", "").replace("\t", " ").replace("\n", " ")
  return str.encode(SRC_ENC)

def ParseCell(cell, row_id, col_id, field_name):
  if cell is None:
    return ""

  result = None
  if cell.ctype == 0:
    result = u""
  elif cell.ctype == 1:
    result = cell.value
  elif cell.ctype == 2:
    result = str(int(cell.value))
  elif cell.ctype == 3:
    if field_name == "age":
      birthday = datetime.date(1899, 12, 30) + datetime.timedelta(cell.value)
      age = 2008 - birthday.year
      if age > 0 and age < 99:
        result = str(age)

  if result is None:
    logging.warning("Row %d, Col %d, Fieldname %s, Unknown cell type: %d, %s" %
                    (row_id, col_id, field_name, cell.ctype, cell.value))
    return ""

  result = NormString(result)
  if field_name == "name":
    if cell.ctype != 1 or len(result) <= 3 or len(result) > 12:
      logging.warning("Row %d, Col %d, Fieldname %s, Bad name: %d, %s" %
                      (row_id, col_id, field_name, cell.ctype, result))
      result = ""

  return result

def ParseCSVCell(cell, row_id, col_id, field_name):
  if cell is None:
    return ""

  result = NormString(unicode(cell, "utf8"))
  if field_name == "name":
    if len(result) <= 3 or len(result) > 12:
      logging.warning("Row %d, Col %d, Fieldname %s, Bad name: %s" %
                      (row_id, col_id, field_name, result))
      result = ""

  return result

def ParseFirstRow(index_row):
  field_map = {}
  for col, field in zip(xrange(len(index_row)), index_row):
    if field not in FIELDS_MAP:
      logging.warning("Input file has field which is not predefined: %s" % unicode(field, SRC_ENC).encode(LOG_ENC))
    else:
      field_map[FIELDS_MAP[field]] = col

  if FIELDS[0] not in field_map.keys():
    logging.warning("The first row must contain required keyword: name.")
    return None

  for field in FIELDS:
    if field not in field_map.keys():
      logging.warning("Predefined field %s not in the input file." % field)

  return field_map

def ParseRow(field_map, row_id, row, callback):
  person = Person()
  for field_name, col_id in field_map.iteritems():
    if col_id >= 0 and col_id < len(row):
      cell = row[col_id]
      person.__dict__[field_name] = callback(cell, row_id, col_id, field_name)
  return person

def ParseSheet(sheet, update_time):
  field_map = ParseFirstRow([cell.value.encode(SRC_ENC) for cell in sheet.row(0)])
  if not field_map:
    logging.warning("Ignore sheet: %s" % sheet.name.encode(LOG_ENC))
    return []
  persons = []
  for row_id in xrange(1, sheet.nrows):
    row = sheet.row(row_id)
    person = ParseRow(field_map, row_id, row, ParseCell)
    person.update_time = update_time
    person.norm()
    if person.validate():
      persons.append(person)
    else:
      logging.debug("Ignore invalidate person at Row: %d" % row_id)
  return persons

def ParseWorkbook(workbook_fn):
  logging.info("Processing workbook: %s" % workbook_fn)
  update_time = FileModifiedTime(workbook_fn)
  book = xlrd.open_workbook(workbook_fn)
  persons = []
  for sheet in book.sheets():
    logging.info("Process worksheet: %s" % sheet.name.encode(LOG_ENC))
    persons.extend(ParseSheet(sheet, update_time))
  return persons

def ParseCSV(csv_fn):
  logging.info("Processing CSV: %s" % csv_fn)
  update_time = FileModifiedTime(csv_fn)
  reader = file(csv_fn, "rb")
  field_map = None
  persons = []
  line = 0
  while True:
    row = reader.readline()
    if not row:
      break
    row = re.split(',', row)
    if line == 0:
      field_map = ParseFirstRow(row)
      if not field_map:
        logging.warning("Ignore csv file: %s" % csv_fn)
        return []
    else:
      person = ParseRow(field_map, line, row, ParseCSVCell)
      person.update_time = update_time
      person.norm()
      if person.validate():
        persons.append(person)
      else:
        logging.debug("Ignore invalidate person at line: %d" % line)
    line += 1
  return persons

def NonTrivialNeq(x, y):
  if x is None:
    return False
  return x != y

def NonTrivialNeqSeq(x, y):
  for a, b in zip(x, y):
    if NonTrivialNeq(a, b):
      return True
  return False

def ScoreSorter(x, y):
  return -cmp((x.score, x.update_time), (y.score, y.update_time))

def IsNotEmbedGroup(x, y, type):
  if type == "onebox":
    # For onebox, persons can only be searched by name, address or unit.
    # Therefore, we group persons by (name, address, unit). Choose the best one
    # from each group. Quality is evaluated by comparing (score, update_time).
    return NonTrivialNeqSeq((x.name, x.address, x.unit),
                            (y.name, y.address, y.unit))
  else:
    # For shenghuo, we only need to remove absolutely identical entries.
    # The following most significant fields are chosen.
    return NonTrivialNeqSeq((x.name, x.sex, x.age, x.hospital, x.hospital_contact, x.status),
                            (y.name, y.sex, y.age, y.hospital, y.hospital_contact, y.status))

def GetCanonicalPersons(persons, type):
  if len(persons) == 1:
    return persons

  logging.debug("-" * 80)

  # Partition persons into groups. For persons within the same group, they were
  # already sorted with ScoreSorter.
  partitions = []
  for person in persons:
    found = False
    for part in partitions:
      if not IsNotEmbedGroup(person, part, type):
        found = True
        break
    if not found:
      # The first entry of a group is chosen as the canonical person.
      partitions.append(person)
      logging.debug("+++: %s" % unicode(str(person), SRC_ENC).encode(LOG_ENC))
    else:
      # Ignore other entries in a group.
      # TODO: merge some fields into the first entry? in case the first entry is not complete.
      logging.debug("---: %s" % unicode(str(person), SRC_ENC).encode(LOG_ENC))
  return partitions

def Dedup(persons, type):
  logging.info("Totally %d persons found in input files." % len(persons))

  # Sort persons by name and quality.
  persons.sort(cmp=lambda x, y: cmp(x.name, y.name) or ScoreSorter(x, y))

  # Add a sentry to the end of the list
  persons.append(Person())
  uniq_persons = []
  last_id = 0
  for id in xrange(1, len(persons)):
    # Find persons with identical name.
    if persons[id].name != persons[last_id].name:
      uniq_persons.extend(GetCanonicalPersons(persons[last_id:id], type))
      last_id = id
  logging.info("%d persons after duplication removal." % len(uniq_persons))
  return uniq_persons

def PrintUsageExit(num):
  print """Usage: <script> --input_pattern=<*>
              --output_file=<file>
              [ --type=onebox,app_engine ]
              [ --starting_shard]
              [ --shard_size]"""
  sys.exit(num)

def ParsePersonsFromFile(file):
  if file.endswith('.xls'):
    return ParseWorkbook(file)
  elif file.endswith('.csv'):
    return ParseCSV(file)
  else:
    logging.error("Unknown format: %s" % file)
  return []

def ParseArguments(argv):
  opts, args = getopt.getopt(argv[1:], 'h',
    ['help', 'input_pattern=', 'output=', 'type=',
     'starting_shard=', 'shard_size=' # used by app_engine_writer
     ])

  input = None
  output = None
  type = 'onebox'
  starting_shard = 0
  shard_size = 1000

  for option, value in opts:
    if option in ('-h', '--help'):
      PrintUsageExit(0)
    if option == '--input_pattern':
      input = value
    if option == '--output':
      output = value
    if option == '--type':
      type = value
    if option == '--starting_shard':
      starting_shard = int(value)
    if option == '--shard_size':
      shard_size = int(value)

  if not (input and output):
    PrintUsageExit(1)

  if type not in ['onebox', 'shenghuo', 'app_engine']:
    PrintUsageExit(1)

  return(input, output, type, starting_shard, shard_size)

def main():
  input_pattern, output, type, starting_shard, shard_size = ParseArguments(sys.argv)
  logging.basicConfig(level=logging.INFO)

  persons = []
  for input in glob.glob(input_pattern):
    persons.extend(ParsePersonsFromFile(input))

  persons = Dedup(persons, type)

  if type == 'onebox':
    onebox_feed_writer.OneboxWriter(output, persons)
  elif type == 'shenghuo':
    onebox_feed_writer.ShenghuoWriter(output, persons)
  elif type == 'app_engine':
    app_engine_writer.Writer(output, persons, starting_shard, shard_size)

  logging.info("Successfully processed %d entries." % len(persons))

if __name__ == "__main__":
  main()
