(*
Unit Name: CFR_Kuhn
Description: CFR / CFRM Kuhn Poker Algorithm
Version: 1.0.0
Author: Ingo Kaps, 63067 Offenbach, Germany ++491713051140
Email Ingo.Kaps@GMX.de

Date: Jun 7, 2014

Copyright (C) 2014 by Ingo Kaps


This is a CFR / CFRM Kuhn Poker Algorithm translating from Java to Pascal ( Delphi )

Source from: http://cs.gettysburg.edu/~tneller/modelai/2013/cfr/index.html

Counterfactual Regret Minimization is an Algorithm that can be used to find the Nash Equilibrium for games of incomplete information.

Kuhn Poker is a simple 3-card poker game by Harold E. Kuhn.


Run:

memo1.Lines.Add(CFR_Kuhn_Training);


*)


unit CFR_Kuhn;

interface

function KuhnTraining:string;

implementation

uses SysUtils,System.Generics.Collections;

const NUM_ACTIONS = 2;
const max_cards   = 2;   // 0..2 -> 3

type actionsarray = array [1..NUM_ACTIONS] of double;
type cardarray    = array [0..max_cards] of nativeint;


//------------------------------------------------------------------------------


type TNode = class     

  infoSet      : String;
  regretSum    : actionsarray;
  strategy     : actionsarray;
  strategySum  : actionsarray;

  private

  function getStrategy(realizationWeight:double):actionsarray;

  public

  procedure init;

  function getAverageStrategy:actionsarray;
  function toString:String;
  
end;


procedure TNode.init;
var i : nativeint;
begin
  infoSet := '';
  for I := 1 to NUM_ACTIONS do
    begin
      regretSum[i] :=  0;
      strategy[i] :=  0;
      strategySum[i] :=  0;
    end;
end;


function TNode.getStrategy(realizationWeight:double):actionsarray;
var normalizingSum : double;
    a : nativeint;    
begin
  normalizingSum := 0;
  for a := 1 to NUM_ACTIONS do
    begin
      if regretSum[a] > 0 then
        strategy[a] := regretSum[a]
      else
        strategy[a] := 0;

      normalizingSum := normalizingSum + strategy[a];
    end;

  for a := 1 to NUM_ACTIONS do
    begin
      if (normalizingSum > 0) then
        strategy[a] := strategy[a] / normalizingSum
      else
        strategy[a] := 1.0 / NUM_ACTIONS;

      strategySum[a] := strategySum[a] + (realizationWeight * strategy[a]);
    end;

  getStrategy := strategy;
end;


function TNode.getAverageStrategy:actionsarray;
var avgStrategy : actionsarray;
    normalizingSum : double;
    a : nativeint;    
begin
  normalizingSum := 0;
  for a := 1 to NUM_ACTIONS do 
    begin
      avgStrategy[a] :=  0;
      normalizingSum := normalizingSum + strategySum[a];
    end;  

  for a := 1 to NUM_ACTIONS do
    if (normalizingSum > 0) then
      avgStrategy[a] := strategySum[a] / normalizingSum
    else
      avgStrategy[a] := 1.0 / NUM_ACTIONS;

  getAverageStrategy := avgStrategy;
end;


function TNode.toString:String;
var avgStrategy : actionsarray;
begin
  avgStrategy := getAverageStrategy;
  toString := infoSet + '   ' +
    Format('%.*f', [8, avgStrategy[1]]) +  '   ' +
    Format('%.*f', [8, avgStrategy[2]]);
end;


//------------------------------------------------------------------------------


type TKuhnTrainer = class (TObject)

  private

  function cfr(cards : cardarray; history:String; p0,p1 : double):double;

  public

  const PASS = 0;
  const BET = 1;

  var random1 : double;

  nodeMap : TDictionary<String, TNode>;

  function train(iterations:nativeint):String;

end;



function TKuhnTrainer.cfr(cards : cardarray; history:String; p0,p1 : double):double;
var plays,player,opponent,a : nativeint;
    terminalPass,doubleBet,isPlayerCardHigher : boolean;
    res,nodeUtil,regret : double;
    infoSet,nextHistory : String;
    Node : Tnode;
    util,strategy : actionsarray;    
begin
  for a := 1 to NUM_ACTIONS do
    begin
      util[a] :=  0;
      strategy[a] :=  0;
    end;

  res := 0;
  plays := length(history);

  player := plays mod 2;
  opponent := 1 - player;

  if (plays > 1) then
    begin
      if history[plays] = 'p' then
        terminalPass := true
      else
        terminalPass := false;

      if history.substring(plays - 2, plays) = 'bb' then
        doubleBet := true
      else
        doubleBet := false;

      if cards[player] > cards[opponent] then
        isPlayerCardHigher := true
      else
        isPlayerCardHigher := false;

      if (terminalPass) then
          if (history = 'pp') then
              if isPlayerCardHigher then
                res := 1
              else
                res := -1
          else
            res := 1
      else
        if (doubleBet) then
           if isPlayerCardHigher then
             res := 2
           else
             res := -2;

    end;


  if erg = 0 then
    begin
      infoSet := inttostr(cards[player]) + history;

      node := nil;
      nodeMap.TryGetValue(infoSet,node);

      if (node = nil) then
        begin
          node := TNode.Create;
          node.init;
          node.infoSet := infoSet;
          nodeMap.Add(infoSet, node);
        end;

      if player = 0 then
        strategy := node.getStrategy(p0)
      else
        strategy := node.getStrategy(p1);


      nodeUtil := 0;
      for a := 1 to NUM_ACTIONS do
        begin
          if a = 1 then
            nextHistory := history + 'p'
          else
            nextHistory := history + 'b';

          if player = 0 then
            util[a] := - cfr(cards, nextHistory, p0 * strategy[a], p1)
          else
            util[a] := - cfr(cards, nextHistory, p0, p1 * strategy[a]);

          nodeUtil := nodeUtil + (strategy[a] * util[a]);

        end;


      for a := 1 to NUM_ACTIONS do
        begin
          regret := util[a] - nodeUtil;

         if node.infoSet = '1' then
           regret := regret;

          if player = 0 then
            node.regretSum[a] := node.regretSum[a] + (p1 * regret)
          else
            node.regretSum[a] := node.regretSum[a] + (p0 * regret);
        end;
        
      res := nodeUtil;
      
    end;
    
  cfr := res;
end;


function TKuhnTrainer.train(iterations:nativeint):String;
var util : double;
    i,c1,c2,tmp : nativeint;
    res : String;
    cards : cardarray;
    n : TNode;
    list: TList<string>;       
begin
  cards[0] := 1;
  cards[1] := 2;
  cards[2] := 3;

  util := 0;
  for I := 0 to (iterations-1) do
    begin
      for c1 := max_cards downto 0 do // shuffle cards
        begin
          c2 := random(c1+1);
          tmp := cards[c1];
          cards[c1] := cards[c2];
          cards[c2] := tmp;
        end;

      util := util + cfr(cards, '', 1, 1);
    end;

  res := 'Average game value = ' + floattostr(util / iterations) +  chr(13)+ chr(10);


  list := TList<string>.Create(nodeMap.Keys);
  try
    list.Sort;

    for i  := 0 to (nodeMap.count-1) do
      res := res  +  chr(13)+ chr(10) +  nodeMap.items[list.items[i]].toString;

  finally
    list.Free;
  end;

   train := res;
 end;



// Mainfunction CFR_Kuhn_Training
// Call e.g.
// memo1.Lines.Add(CFR_Kuhn_Training);


function CFR_Kuhn_Training:string;
var iterations : nativeint;
    KuhnTrainer : TKuhnTrainer;    
begin
  //iterations := 100;
  iterations := 5000000;
 
  KuhnTrainer := TKuhnTrainer.Create;
  KuhnTrainer.nodeMap := TDictionary<String, TNode>.create;

  KuhnTraining := KuhnTrainer.train(iterations);
end;


end.
