#include "sort.h"
namespace ads {
// Sorting:
//  Effeciency:
//    Good behaviour: O(nlogn) Bad behaviour: O(n^2) Ideal: O(n)
//    but it is not possible in the average case.
//
//  Stability:
//    stable sorting algorithms maintain the relative order of
//    records with equal keys.
//
//  Adaptability:
//    taking into account if the list is already sorted.

// Bubble sort
//
// Loop through N elements N times, each time comparing
// the two next to each other, swapping if they are not
// in order.
//
// Complexity:
//   Worst: O(n^2)
//   Avg: O(n^2)
// Stable: Yes.
void Sort::bubble_sort(int data[], const int start, const int end) {
  for (int i = start; i <= end; ++i) {
    for (int j = start; j < end; ++j) {
      if (data[j] > data[j+1]) {  // not in order
        // swap
        int tmp = data[j];
        data[j] = data[j+1];
        data[j+1] = tmp;
      }
    }
  }
}

// Complexity is O(n)
void Sort::merge(int data[], const int start, const int mid, const int end) {
  int *data_output = new int[end-start+1];
  int h = start;
  int e = mid + 1;
  int o = 0;
  while (h <= mid && e <= end) {
    if (data[h] <= data[e]) {
      data_output[o++] = data[h++];
    } else {
      data_output[o++] = data[e++];
    }
  }
  for (int k = h; k <= mid; ++k) {
    data_output[o++] = data[k];

  }
  for (int k = e; k <= end; ++k) {
    data_output[o++] = data[k];
  }
  for (int k = 0; k < (end-start+1); ++k) {
    data[start+k] = data_output[k];
  }
  delete[] data_output;
}

// Merge Sort
//
// Keep on dividing the data into two lists of almost the same size recursively
// until the size of a new list becomes 0
// Merge these two lists, by taking both of the lists and then creating
// an output list of the size K = size A + size B, where size A is the size
// of the first list and size B is the size of the second list, inserting
// elements into this list by selecting the smaller of both lists, until
// one of the lists is finished, then the rest is dumped in the end
//
// Complexity:
//   Worst: O(nlogn)
//   Average: O(nlogn)
// Stable:
//   Yes.
//
// Scales well to large lists and used one of the most used
// sorting algorithm in the standard libraries of programming.
//
// Uses divide and conquer approach: Divide unsorted list into
// two sublists of about half the size, sort each, merge both.
//
// Implementation details: The splitting happens in the beginning,
// recursively till we have a list of size: 1 and 1 or 1 and 2,
// comparing them and creating a sorted list from both
// therefore a list of size X will be split into N 2 lists
// of size 1 and 1 or 1 and 2, creating sorted out of it, then
// two out of this sorted list will be used to create a sorted
// list out of it, and so on until we get the original list sorted.
// The merging part is O(n) and the splitting result in merging
// log n times, thus the complexity is O(nlogn).
void Sort::merge_sort(int data[], const int start, const int end) {
  if (start < end) {
    int mid = (start + end) / 2;
    // The splitting part down is the result of log n.
    merge_sort(data, start, mid);
    merge_sort(data, mid+1, end);
    merge(data, start, mid, end);
  }
}

// Quick Sort
//
// Partition the data by picking a pivot and rearranging the elements such
// that all those before the pivot are less than it and all those after are
// more than the pivot (equal values can go either way) -> O(n)
// Recursively sort the sublist of lesser elements and sublist of greater
// elements.
//
// Complexity:
//   Average case: O(nlogn) Worst case: O(n^2)
//   The choice of the pivot determine where the algorithm performs between
//   both, where the best choice for the pivot would be the median of
//   the elements.
//
// Implementation details:
//   recursively pick the element in the middle, putting smaller element
//   to its left and larger elements to its right. Then do the same for the
//   left part and the right part until we have our list sorted.
// Eg. 1 12 5 26 7 14 3 7 2
//               7 -> pivot
//        i               j -> swap
//     1  2 5 26 7 14 3 7 12
//
//            i       j     -> swap
//     1  2 5 3 7 14 26 7 12
//
//             ij           -> swap [swapping same pos -> no effect], ++i, --j
//          now i > j -> stop partitioning
// Do the same for 1 2 5 3  AND 7 14 26 7 12
void Sort::quick_sort(int data[], const int start, const int end) {
  int pivot = data[(start + end) / 2];
  int i = start, j = end;
  // Paritioning.
  //   Complexity is O(n).
  while (i <= j) {
    while (data[i] < pivot) {
      ++i;
    }
    while (data[j] > pivot) {
      --j;
    }
    // i will stop such that data[i] >= pivot
    // j will stop such that data[j] <= pivot
    // at this point always swap if i less than or equal
    // to j.
    // i must be equal than or less than j.
    // if i is less than j, then swapping will allow
    // one or both of the while loops above to continue
    // if i is equal to j, then swapping will have no
    // effect, but i will be incremeneted and j will
    // be decremented, breaking from the above loop.
    if (i <= j) {
      int tmp = data[i];
      data[i] = data[j];
      data[j] = tmp;
      i++;
      j--;
    }
  }
  // From start till i -> less than pivot
  // From j till end -> more than pivot
  // The splitting part down is the result of the log n.
  if (start < j) {
    quick_sort(data, start, j);
  }
  if (end > i) {
    quick_sort(data, i, end);
  }
}

// Insertion sort
//
// Works by inserting the element each time an element in its correct
// position. However insertion is expensive since it involve shifting
// all elements before it that are larger than it.
//
// Complexity:
//   Worst: O(n^2)  Average: O(n^2)
// Stable: Yes.
void Sort::insertion_sort(int data[], const int start, const int end) {
  // loop N times, N = end-start+1
  for (int i = start+1; i <= end; ++i) {
    int val = data[i];
    // Each time, start with (i - 1), replacing each time the element afterwards with
    // the current element if the current element is bigger than val
    int j = i - 1;
    for (; j >= start && data[j] > val; --j) {
      data[j + 1] = data[j];
    }
    // place val just after the element less than val
    data[j+1] = val;
  }
}

// Selection sort
//
// Find the smallest element, and places it in the first position, repeat
// for the rest of the list.
//
// It does no more than n swaps, which makes it useful when swapping is
// very expensive.
//
// Complexity:
//   Average: O(n^2)
//   Worst: O(n^2)
// Stable: No.
void Sort::selection_sort(int data[], const int start, const int end) {
  for (int i = start; i < end; ++i) {
    int min = i;
    for (int j = i + 1; j <= end; ++j) {
      if (data[j] < data[min]) {
        min = j;
      }
    }
    int tmp = data[i];
    data[i] = data[min];
    data[min] = tmp;
  }
}

// Heap Sort
//
// construct a max-heap from the data set, removing the largest element from
// the heap and placing it at the end, the largest element is removed from
// the heap and then the next largest is placed in the data set and removed
// from the max-heap and so on until no elements are left in the max-heap.
//
// Complexity:
//   Average: O(nlogn)
//   Worst: O(nlogn)
// Stable: No.

// Counting Sort
//
// Counts each number in the dataset and then construct a final output by
// placing N items of each number where N is the its count.
//
// Performs well if the dataset values are in a dataset S, where S size
// is not large.
//
// Complexity: O( |S| + n ) where |S| is the size of the range and n is
// the input size.
void Sort::counting_sort(int data[], const int start, const int end) {
  int min = data[start], max = data[start];
  for (int i = start + 1; i <= end; ++i) {
    if (data[i] < min) {
      min = data[i];
    }
    if (data[i] > max) {
      max = data[i];
    }
  }

  int range = max-min+1;
  int *count = new int[range];

  for (int i = 0; i < range; ++i) {
    count[i] = 0;
  }
  for (int i = start; i <= end; ++i) {
    count[ data[i] - min ]++;
  }
  int z = 0;
  for (int i = min; i <= max; ++i) {
    for (int j = 0; j < count[i - min]; ++j) {
      data[z++] = i;
    }
  }
  delete[] count;
}

// Bucket Sort
//
// Divide the data set into a set of buckets. Sort each bucket, then
// construct an output from the sorted buckets in order.
// Only works well if the data has a small range of values.


// Radix Sort
//
// Sort numbers consisting of k digits, where the numbers are processed from
// the least significant digit (in which case a stable sorting algorithm must
// be used) up until the most significant digit or from the most significant
// digit until the least significant digit.
//
// It is common that a counting sort is used internally by radix sort.
// n numbers of k digits are sorted in O(nk) time.

// Distribution sort refers to any sort algorithm where the input is
// distributed into multiple intermediate structure which are then
// gathered and placed on the output.

// Shell Sort
//
// makes the data h-sort where h starts by being half the size of
// the data set and gets halved each time.
// What does h-sorted means ?
// Eg. 10 9 8 7 6 5 4 3 2 1
//     2  9  4 7 6 5 8 3 10 1
// is 2-sorted
// Complexity: (Depends on gap sequence -> choice of d)
//   Worst: O(nlog^2n)
//   Average: O(nlog^2n) or O(n^3/2)
//   Best: O(n)
// Stable: No.
void Sort::shell_sort(int data[], const int start, const int end) {
  int d, n;
  n = (end - start + 1);
  d = n / 2;
  while (d >= 1) {
    for (int i = 0; i < n - d; ++i) {
      if (data[i] > data[i+d]) {
        int tmp = data[i];
        data[i] = data[i+d];
        data[i+d] = tmp;
      }
    }
    if (d == 1) {
      break;
    }
    d = (d / 2.0) + 0.5;
  }
  // Eg. for d, d = 8, d = 4, d = 2, d = 1
  // for d, d = 9, d = 5, d = 3, d = 1
}

// Comb sort: Same as bubble sort but basic idea is that
// gap can be more than 1 (as opposed to 1 in bubble sort).
// The gap starts with being the length of the list being
// sorted and then gets divided by shrink factor.
// The gap keeps on getting divided by shrink factor until
// it becomes 1.
// Complexity: O(n^2), Not stable.

// Tim sort: it runs on the data creating insertion sort if
// necessary and then uses merge sort to create the final
// sorted list. It has the same complexity O(nlogn) in the
// average and worst case, however with presorted data it
// goes down to O(n), it is used in python standard sort.

} // end of ads
