import 'dart:convert';
import 'dart:typed_data';
import 'dart:math' show min;
import 'package:archive/archive.dart';
import 'package:dbio_utils/base/lru_cache.dart';
import 'package:dbio_utils/bgzf_filehandle/unzip.dart';
import 'package:dbio_utils/tabix/util.dart';
import 'package:dbio_utils/tabix/virtual_offset.dart';
import 'package:flutter/cupertino.dart';
import 'dart:math' show pow, max;
import '../generic_filehandle/generic_file_handle.dart';

import 'index_file.dart';
import 'chunk.dart';
import 'tbi.dart' as TBI;
import 'csi.dart' as CSI;

class TabixIndexedFile {
  GenericFileHandle fileHandle;
  GenericFileHandle tbiFileHandle;
  GenericFileHandle csiFileHandle;
  IndexFile index;
  int chunkSizeLimit;
  int chunkCacheSize;

  RenameRefSeq renameRefSeq;

  int yieldLimit;
  LruCache chunkCache;

  TabixIndexedFile({
    @required this.fileHandle,
    this.tbiFileHandle,
    this.csiFileHandle,
    this.chunkSizeLimit = 2000000,
    this.yieldLimit = 300,
    this.renameRefSeq = defRenameRefSeq,
    this.chunkCacheSize,
  }) {
    this.chunkCacheSize ??= 5 * pow(2, 20);
    chunkCache = LruCache(this.chunkCacheSize);
    if (tbiFileHandle != null) {
      index = TBI.TabixIndex(fileHandle: tbiFileHandle, renameRefSeq: this.renameRefSeq);
    }

    if (csiFileHandle != null) {
      index = CSI.CSI(fileHandle: csiFileHandle);
    }
  }

  /// @param {string} refName name of the reference sequence
  /// @param {number} start start of the region (in 0-based half-open coordinates)
  /// @param {number} end end of the region (in 0-based half-open coordinates)
  /// @param {function|object} lineCallback callback called for each line in the region. can also pass a object param containing obj.lineCallback, obj.signal, etc
  /// @returns {Promise} resolved when the whole read is finished, rejected on error
  getLines({
    @required String refName,
    int start = 0,
    int end,
    Map opts,
    @required Function callback,
  }) async {
    AbortSignal signal;
    Map options = {};

    var metadata = await this.index.getMetadata(options);
    checkAbortSignal(signal);

    if (end == null) {
      end = metadata['maxRefLength'];
    }
    if (!(start <= end)) throw Exception('invalid start and end coordinates. start must be less than or equal to end');
    if (start == end) return;

    List<Chunk> chunks = await this.index.blocksForRange(refName: refName, start: start, end: end);
    checkAbortSignal(signal);

    // check the chunks for any that are over the size limit.  if
    // any are, don't fetch any of them
    for (var i = 0; i < chunks.length; i += 1) {
      var size = chunks[i].fetchedSize();
      if (size > this.chunkSizeLimit) {
        throw Exception('Too much data. Chunk size $size bytes exceeds chunkSizeLimit of ${this.chunkSizeLimit}.');
      }
    }

    // now go through each chunk and parse and filter the lines out of it
    var linesSinceLastYield = 0;
    for (var chunkNum = 0; chunkNum < chunks.length; chunkNum += 1) {
      int previousStartCoordinate;
      var c = chunks[chunkNum];
      ChunkSlice _chunkCache = await readChunk(c);
      var buffer = _chunkCache.buffer;
      var cpositions = _chunkCache.cpositions;
      var dpositions = _chunkCache.dpositions;
      List<String> lines = utf8.decode(buffer).split('\n');
      if (lines.last == "" || lines.last.trim().isEmpty) lines.removeLast();
      checkAbortSignal(signal);
      var blockStart = c.minv.dataPosition;
      var pos;

      for (var i = 0; i < lines.length; i += 1) {
        var line = lines[i];

        for (pos = 0; pos < dpositions.length - 1 && blockStart >= dpositions[pos]; pos += 1) {
          //
        }

        // filter the line for whether it is within the requested range
        Map _checkLine = this.checkLine(metadata, refName, start, end, line);

        var startCoordinate = _checkLine['startCoordinate'];
        var overlaps = _checkLine['overlaps'];

        // do a small check just to make sure that the lines are really sorted by start coordinate
        if (previousStartCoordinate != null && startCoordinate != null && previousStartCoordinate > startCoordinate)
          throw Exception('Lines not sorted by start coordinate ($previousStartCoordinate > $startCoordinate), this file is not usable with Tabix.');
        previousStartCoordinate = startCoordinate;

        if (overlaps) {
          callback(
            line.trim(),
            // cpositions[pos] refers to actual file offset of a bgzip block boundaries
            //
            // we multiply by (1 <<8) in order to make sure each block has a "unique"
            // address space so that data in that block could never overlap
            //
            // then the blockStart-dpositions is an uncompressed file offset from
            // that bgzip block boundary, and since the cpositions are multiplied by
            // (1 << 8) these uncompressed offsets get a unique space
            cpositions[pos] * (1 << 8) + (blockStart - dpositions[pos]),
          );
        } else if (startCoordinate != null && startCoordinate >= end) {
          // the lines were overlapping the region, but now have stopped, so
          // we must be at the end of the relevant data and we can stop
          // processing data now
          return;
        }
        blockStart += line.length + 1;

        // yield if we have emitted beyond the yield limit
        linesSinceLastYield += 1;
        if (linesSinceLastYield >= this.yieldLimit) {
          await Future.delayed(Duration(milliseconds: 500));
          checkAbortSignal(signal);
          linesSinceLastYield = 0;
        }
      }
    }
  }

  Future<Map> getMetadata(Map options) async {
    return this.index.getMetadata(options);
  }

  /// get a buffer containing the "header" region of
  /// the file, which are the bytes up to the first
  /// non-meta line
  ///
  /// @returns {Promise} for a buffer
  Future<List<int>> getHeaderBuffer(Map opts) async {
    Map meta = await this.getMetadata(opts);
    VirtualOffset firstDataLine = meta['firstDataLine'];
    String metaChar = meta['metaChar'];
    int maxBlockSize = meta['maxBlockSize'];

    checkAbortSignal(opts['signal']);
    var maxFetch = firstDataLine != null && firstDataLine.blockPosition > 0 ? firstDataLine.blockPosition + maxBlockSize : maxBlockSize;
    // TODO: what if we don't have a firstDataLine, and the header
    // actually takes up more than one block? this case is not covered here

    var bytes = await this._readRegion(0, maxFetch, opts);
    checkAbortSignal(opts['signal']);
    try {
      bytes = GZipDecoder().decodeBytes(bytes);
    } catch (e) {
      print(e);
      throw Exception('error decompressing block ${e.code} at 0 (length $maxFetch) $e');
    }

    // trim off lines after the last non-meta line
    if (metaChar != null) {
      // trim backward from the end
      var lastNewline = -1;
      var newlineByte = '\n'.codeUnitAt(0);
      var metaByte = metaChar.codeUnitAt(0);
      for (var i = 0; i < bytes.length; i += 1) {
        if (i == lastNewline + 1 && bytes[i] != metaByte) break;
        if (bytes[i] == newlineByte) lastNewline = i;
      }
      bytes = bytes.sublist(0, lastNewline + 1);
    }
    return bytes;
  }

  /// get a string containing the "header" region of the
  /// file, is the portion up to the first non-meta line
  ///
  /// @returns {Promise} for a string
  Future<String> getHeader([Map opts = const {}]) async {
    var bytes = await this.getHeaderBuffer(opts);
    checkAbortSignal(opts['signal']);
    return utf8.decode(bytes);
  }

  /// get an array of reference sequence names, in the order in which
  /// they occur in the file.
  ///
  /// reference sequence renaming is not applied to these names.
  ///
  /// @returns {Promise} for an array of string sequence names
  getReferenceSequenceNames(Map opts) async {
    var metadata = await this.getMetadata(opts);
    return metadata['refIdToName'];
  }

  /// @param {object} metadata metadata object from the parsed index,
  /// containing columnNumbers, metaChar, and format
  /// @param {string} regionRefName
  /// @param {number} regionStart region start coordinate (0-based-half-open)
  /// @param {number} regionEnd region end coordinate (0-based-half-open)
  /// @param {array[string]} line
  /// @returns {object} like `{startCoordinate, overlaps}`. overlaps is boolean,
  /// true if line is a data line that overlaps the given region
  Map checkLine(Map metaData, String regionRefName, int regionStart, int regionEnd, String line) {
    Map columnNumbers = metaData['columnNumbers'];
    String metaChar = metaData['metaChar'];
    String coordinateType = metaData['coordinateType'];
    String format = metaData['format'];
    // skip meta lines
    if (line[0] == metaChar) {
      return {'overlaps': false};
    }

    // check ref/start/end using column metadata from index
    int ref = columnNumbers['ref'];
    int start = columnNumbers['start'];
    int end = columnNumbers['end'];

    if (ref == null) ref = 0;
    if (start == null) start = 0;
    if (end == null) end = 0;
    if (format == 'VCF') end = 8;
    var maxColumn = max(ref, max(start, end));

    // this code is kind of complex, but it is fairly fast.
    // basically, we want to avoid doing a split, because if the lines are really long
    // that could lead to us allocating a bunch of extra memory, which is slow

    var currentColumnNumber = 1; // cols are numbered starting at 1 in the index metadata
    var currentColumnStart = 0;
    var refSeq = '';
    int startCoordinate;
    for (var i = 0; i < line.length + 1; i += 1) {
      if (i == line.length || line[i] == '\t') {
        if (currentColumnNumber == ref) {
          if (this.renameRefSeq(line.substring(currentColumnStart, i)) != regionRefName) {
            return {'overlaps': false};
          }
        } else if (currentColumnNumber == start) {
          startCoordinate = int.tryParse(line.substring(currentColumnStart, i));
          // we convert to 0-based-half-open
          if (coordinateType == '1-based-closed') {
            startCoordinate -= 1;
          }
          if (startCoordinate >= regionEnd) {
            return {'startCoordinate': startCoordinate, 'overlaps': false};
          }
          if (end == 0 || end == start) {
            // if we have no end, we assume the feature is 1 bp long
            if (startCoordinate + 1 <= regionStart) {
              return {'startCoordinate': startCoordinate, 'overlaps': false};
            }
          }
        } else if (format == 'VCF' && currentColumnNumber == 4) {
          refSeq = line.substring(currentColumnStart, i);
        } else if (currentColumnNumber == end) {
          var endCoordinate;
          // this will never match if there is no end column
          if (format == 'VCF') {
            endCoordinate = this._getVcfEnd(
              startCoordinate,
              refSeq,
              line.substring(currentColumnStart, i),
            );
          } else {
            endCoordinate = int.tryParse(line.substring(currentColumnStart, i));
          }
          if (endCoordinate < regionStart) {
            return {'overlaps': false};
          }
        }
        currentColumnStart = i + 1;
        currentColumnNumber += 1;
        if (currentColumnNumber > maxColumn) {
          break;
        }
      }
    }
    return {'startCoordinate': startCoordinate, 'overlaps': true};
  }

  _getVcfEnd(int startCoordinate, String refSeq, String info) {
    var endCoordinate = startCoordinate + refSeq.length;
    // ignore TRA features as they specify CHR2 and END
    // as being on a different chromosome
    // if CHR2 is on the same chromosome, still ignore it
    // because there should be another pairwise feature
    // at the end of this one
    var isTRA = info.indexOf('SVTYPE=TRA') != -1;
    if (info[0] != '.' && !isTRA) {
      var prevChar = ';';
      for (var j = 0; j < info.length; j += 1) {
        if (prevChar == ';' && info.substring(j, min(j + 4, info.length)) == 'END=') {
          var valueEnd = info.indexOf(';', j);
          if (valueEnd == -1) valueEnd = info.length;
          endCoordinate = int.tryParse(info.substring(j + 4, valueEnd));
          break;
        }
        prevChar = info[j];
      }
    } else if (isTRA) {
      return startCoordinate + 1;
    }
    return endCoordinate;
  }

  /// return the approximate number of data lines in the given reference sequence
  /// @param {string} refSeq reference sequence name
  /// @returns {Promise} for number of data lines present on that reference sequence
  Future<int> lineCount(String refName, [Map opts = const {}]) async {
    return this.index.lineCount(refName, opts);
  }

  Future<List<int>> _readRegion(int position, int compressedSize, [Map opts = const {}]) async {
    List<int> bytes = await this.fileHandle.read(0, compressedSize, position);
    return bytes;
  }

  /// read and uncompress the data in a chunk (composed of one or more
  /// contiguous bgzip blocks) of the file
  /// @param {Chunk} chunk
  /// @returns {Promise} for a string chunk of the file
  Future<ChunkSlice> readChunk(Chunk chunk, [Map opts = const {}]) async {
    // fetch the uncompressed data, uncompress carefully a block at a time,
    // and stop when done
    ChunkSlice _cachedSlice = chunkCache.get(chunk.toString());
    if (_cachedSlice != null) return _cachedSlice;

    var compressedData = await this._readRegion(chunk.minv.blockPosition, chunk.fetchedSize(), opts);
    try {
      ChunkSlice chunkSlice = unzipChunkSlice(compressedData, chunk);
      if (chunkSlice != null) chunkCache.save(chunk.toString(), chunkSlice);
      return chunkSlice;
    } catch (e) {
      throw Exception('error decompressing chunk $chunk $e');
    }
  }
}
