package org.anno.chunkdb;

import static com.google.common.base.Preconditions.checkArgument;

import com.google.appengine.api.datastore.EntityNotFoundException;
import com.google.common.annotations.VisibleForTesting;

import org.anno.chunkdb.Chunkdb.ColumnSpec.Aggregate;

import java.io.IOException;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

/**
 * Parses SQL strings into {@link Chunkdb.Select}.
 */
public class QueryParser {

  // We use possessive quantifiers (*+ and ++) because otherwise the Java
  // regex matcher has stack overflows on large inputs.
  private static final Pattern TOKEN = Pattern.compile(
      "[a-zA-Z_][0-9a-zA-Z_+-]*+" // an identifier
      + "|[.]?[0-9+-][0-9a-zA-Z_.+-]*+" // a number
      + "|\"([^\"\n\\\\]|\\\\.)*+\"" // a double-quoted string
      + "|'([^\'\n\\\\]|\\\\.)*+'" // a single-quoted string
      + "|\\*|[<>]=?|=", // SQL symbols
      Pattern.MULTILINE);
  private static final Pattern KEYWORD = Pattern.compile(
      "SELECT|FROM|WHERE|GROUP|ORDER|OFFSET|LIMIT", Pattern.CASE_INSENSITIVE);
  public static final Pattern COL_PATTERN = Pattern.compile("col[0-9]+");
  public static final Pattern ASC_DESC = Pattern.compile("(asc)|(de?sc)", Pattern.CASE_INSENSITIVE);
  private final TableDao dao;

  public QueryParser() {
    this(new TableDao());
  }

  @VisibleForTesting
  public QueryParser(TableDao dao) {
    this.dao = dao;
  }

  public Chunkdb.Select parse(String sql)
      throws EntityNotFoundException, SQLException, IOException {
    return validate(parseRaw(sql));
  }

  public Chunkdb.Select validate(Chunkdb.Select select)
      throws EntityNotFoundException, IOException, SQLException {
    Metadata.Table table = dao.get(select.getFrom());
    Set<String> tableColumnNames =
        table.getColumnList().stream().map(Metadata.Column::getName).collect(Collectors.toSet());

    Set<String> selectedNames = new HashSet<>();
    int aggregateCount = 0;
    for (Chunkdb.ColumnSpec spec : select.getColumnList()) {
      checkArgument(spec.hasName() || spec.getAggregate() == Aggregate.COUNT, "%s", spec);
      if (spec.hasAggregate()) {
        aggregateCount += 1;
      }
      if (spec.hasName()) {
        String name = spec.getName();
        selectedNames.add(name);
        if ("*".equals(name)) {
          checkArgument(
              !spec.hasAggregate() || spec.getAggregate() == Aggregate.COUNT, "* in %s", spec);
          checkArgument(
              select.getColumnCount() == 1 || spec.hasAggregate(), "* with others %s", select);
        } else {
          isValidColumnName(name, tableColumnNames, table);
        }
      }
    }

    checkArgument(
        select.getGroupByCount() == 0 || aggregateCount > 0, "no aggregation for group %s", select);

    for (Chunkdb.Filter filter : select.getWhereList()) {
      isValidColumnName(filter.getColumn(), tableColumnNames, table);
    }

    for (String groupByName : select.getGroupByList()) {
      checkArgument(selectedNames.contains(groupByName), "group by %s not selected", groupByName);
      isValidColumnName(groupByName, tableColumnNames, table);
    }

    if (select.hasOrderBy()) {
      isValidColumnName(select.getOrderBy(), tableColumnNames, table);
    }
    return select;
  }

  private static void isValidColumnName(
      String name, Set<String> tableColumnNames, Metadata.Table table) {
    if (COL_PATTERN.matcher(name).matches()) {
      checkArgument(
          Integer.parseInt(name.substring(3)) < table.getColumnCount(), "%s in %s", name, table);
    } else {
      checkArgument(tableColumnNames.contains(name), "%s not a column name in %s", name, table);
    }
  }

  public static Chunkdb.Select parseRaw(String sql) {
    Matcher matcher = TOKEN.matcher(sql);
    expectToken(matcher);
    return parseSelect(matcher, Chunkdb.Select.newBuilder());
  }

  private static Chunkdb.Select parseSelect(Matcher m, Chunkdb.Select.Builder builder) {
    expectKeywordAndToken("SELECT", m);
    parseColumnSpecList(m, builder);
    expectKeywordAndToken("FROM", m);
    builder.setFrom(m.group());
    return m.find() ? parseModifiers(m, builder) : builder.build();
  }

  private static void parseColumnSpecList(Matcher matcher, Chunkdb.Select.Builder builder) {
    do {
      Chunkdb.ColumnSpec.Builder columnBuilder = builder.addColumnBuilder();
      try {
        Aggregate aggregate = Aggregate.valueOf(matcher.group().toUpperCase());
        columnBuilder.setAggregate(aggregate);
        if (aggregate == Aggregate.COUNT) {
          continue;
        }
        expectToken(matcher);
      } catch (IllegalArgumentException ignored) {
      }
      columnBuilder.setName(normalize(matcher.group()));
    } while (matcher.find() && !"from".equalsIgnoreCase(matcher.group()));
  }

  private static Chunkdb.Select parseModifiers(Matcher m, Chunkdb.Select.Builder builder) {
    while (true) {
      switch (m.group().toUpperCase()) {
        case "WHERE":
          expectToken(m);
          do {
            parseFilter(m, builder.addWhereBuilder());
            if (!m.find()) {
              return builder.build();
            }
            if ("AND".equalsIgnoreCase(m.group())) {
              expectToken(m);
            }
          } while (!KEYWORD.matcher(m.group()).matches());
          break;
        case "GROUP":
          expectToken(m);
          expectKeywordAndToken("BY", m);
          do {
            builder.addGroupBy(normalize(m.group()));
            if (!m.find()) {
              return builder.build();
            }
          } while (!KEYWORD.matcher(m.group()).matches());
          break;
        case "ORDER":
          expectToken(m);
          expectKeywordAndToken("BY", m);
          do {
            builder.setOrderBy(normalize(m.group()));
            if (!m.find()) {
              return builder.build();
            }
            Matcher adm = ASC_DESC.matcher(m.group());
            if (adm.matches()) {
              builder.setAscending(adm.start(1) < adm.end(1));
              if (!m.find()) {
                return builder.build();
              }
            }
          } while (!KEYWORD.matcher(m.group()).matches());
          break;
        case "OFFSET":
          expectToken(m);
          builder.setOffset(Integer.parseInt(m.group()));
          if (!m.find()) {
            return builder.build();
          }
          break;
        case "LIMIT":
          expectToken(m);
          builder.setLimit(Integer.parseInt(m.group()));
          if (!m.find()) {
            return builder.build();
          }
          break;
        default:
          throw new IllegalArgumentException("Expected keyword but got " + m.group());
      }
    }
  }

  private static String normalize(String s) {
    return s.startsWith("'") && s.endsWith("'")
           || s.startsWith("\"") && s.startsWith("\"")
           ? s.substring(1, s.length() - 1) : s;
  }

  private static void parseFilter(Matcher m, Chunkdb.Filter.Builder filter) {
    filter.setColumn(normalize(m.group()));
    expectToken(m);
    filter.setOp(parseOp(m.group()));
    expectToken(m);
    filter.setStringRhs(normalize(m.group()));
  }

  private static Chunkdb.Filter.Op parseOp(String token) {
    switch (token) {
      case "<":
        return Chunkdb.Filter.Op.LT;
      case "<=":
        return Chunkdb.Filter.Op.LE;
      case "=":
        return Chunkdb.Filter.Op.EQ;
      case ">=":
        return Chunkdb.Filter.Op.GE;
      case ">":
        return Chunkdb.Filter.Op.GT;
    }
    throw new IllegalArgumentException(
        token + " not mapped to " + Arrays.asList(Chunkdb.Filter.Op.values()));
  }

  private static void expectKeywordAndToken(String keyWord, Matcher m) {
    if (!keyWord.equalsIgnoreCase(m.group())) {
      throw new IllegalArgumentException("Expected token " + keyWord + " in " + m);
    }
    expectToken(m);
  }

  private static void expectToken(Matcher m) {
    if (!m.find()) {
      throw new IllegalArgumentException("Unexpected end of input " + m);
    }
  }
}
