﻿import numpy as np
from numpy import linalg


class Mk():
    def __init__(self):

        self.N = 12
        self.A = np.arange(4)
        self.X = np.arange(self.N)
        self.Gamma = 1
        self.StartState = 8
        self.EndStates = [3, 7]

        self.make_p()
        self.make_r()

    def make_p(self):
        self.P = np.zeros((self.N, len(self.A), self.N))
        walls = [5]

        directions = {
            0: [(1, 0.8), (4, 0.1), (-4, 0.1)],
            1: [(4, 0.8), (1, 0.1), (-1, 0.1)],
            2: [(-1, 0.8), (4, 0.1), (-4, 0.1)],
            3: [(-4, 0.8), (1, 0.1), (-1, 0.1)]
        }

        for s in range(self.N):
            if s in self.EndStates or s in walls:
                continue
            for a in self.A:
                for direction, prob in directions[a]:
                    new_s = s + direction

                    if (direction == 1 and (s + 1) % 4 == 0) or \
                            (direction == -1 and s % 4 == 0) or \
                            new_s < 0 or new_s >= self.N or new_s in walls:
                        new_s = s

                    self.P[s, a, new_s] += prob


    def make_r(self):
        self.R = -0.04 * np.ones(self.N)
        self.R[3] = +1
        self.R[7] = -1

    def policy(self):
        pai = np.zeros(self.N, dtype=int)
        is_change = True
        while is_change:
            u = self.eval(pai)
            is_change = False
            for x in self.X:
                if np.max(np.dot(self.P[x, :, :], u)) > np.dot(self.P[x, pai[x], :], u) + 1E-5:
                    pai[x] = np.argmax(np.dot(self.P[x, :, :], u))
                    is_change = True
        return u, pai

    def update(self, r):
        for i in range(self.N):
            if i not in self.EndStates:
                self.R[i] = r

    def eval(self, pi):
        A = np.zeros((self.N, self.N))
        for i in range(self.N):
            A[i, :] = self.Gamma * self.P[i, pi[i], :]
        A = A - np.identity(self.N)
        b = -self.R
        u = linalg.solve(A, b)
        return u

    def get_action(self, x, y):
        state_index = int((3 - y) * 4 + (x - 1))

        assert 0<=state_index<self.N

        _, policy = self.policy()

        policy = policy.astype(int)

        return policy[state_index]

env = Mk()
r, x, y = map(float, input().split())
env.update(r)
print(env.get_action(x, y))

