tfrere commited on
Commit
24bdd7c
·
1 Parent(s): 6ed333e
Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:18 as client-build
2
+ WORKDIR /app
3
+ COPY client/package*.json ./
4
+ RUN npm install
5
+ COPY client/ ./
6
+ RUN npm run build
7
+
8
+ FROM python:3.9-slim
9
+ WORKDIR /app
10
+
11
+ # Create non-root user
12
+ RUN useradd -m -u 1000 user
13
+
14
+ # Install system dependencies and poetry
15
+ RUN apt-get update && apt-get install -y \
16
+ netcat-openbsd \
17
+ && rm -rf /var/lib/apt/lists/* \
18
+ && pip install poetry
19
+
20
+ # Copy and install Python dependencies
21
+ COPY server/pyproject.toml server/poetry.lock* ./
22
+ RUN poetry config virtualenvs.create false \
23
+ && poetry install --no-interaction --no-ansi --only main --no-root
24
+
25
+ # Copy server code
26
+ COPY server/ ./server/
27
+
28
+ # Copy client build
29
+ COPY --from=client-build /app/dist ./static
30
+
31
+ # Environment variables
32
+ ENV API_HOST=0.0.0.0 \
33
+ API_PORT=7860
34
+
35
+ # Create cache directory and set permissions
36
+ RUN mkdir -p /app/cache && chown -R user:user /app/cache
37
+
38
+ # Switch to non-root user
39
+ USER user
40
+
41
+ EXPOSE 7860
42
+
43
+ # Start the server
44
+ CMD ["python", "-m", "uvicorn", "server.server:app", "--host", "0.0.0.0", "--port", "7860"]
ai-comic-factory/yarn.lock ADDED
The diff for this file is too large to render. See raw diff
 
client/src/App.jsx CHANGED
@@ -9,7 +9,6 @@ import {
9
  ListItem,
10
  ListItemText,
11
  LinearProgress,
12
- ButtonGroup,
13
  } from "@mui/material";
14
  import RestartAltIcon from "@mui/icons-material/RestartAlt";
15
  import axios from "axios";
@@ -19,25 +18,30 @@ function App() {
19
  const [currentChoices, setCurrentChoices] = useState([]);
20
  const [isLoading, setIsLoading] = useState(false);
21
 
22
- // Start the story when the component mounts
23
- useEffect(() => {
24
- handleStoryAction("start");
25
- }, []);
26
-
27
  const handleStoryAction = async (action, choiceId = null) => {
28
  setIsLoading(true);
29
  try {
30
- const response = await axios.post("http://localhost:8000/chat", {
31
  message: action,
32
  choice_id: choiceId,
33
  });
34
 
35
  if (action === "restart") {
36
- setStorySegments([{ text: response.data.story_text, isChoice: false }]);
 
 
 
 
 
 
37
  } else {
38
  setStorySegments((prev) => [
39
  ...prev,
40
- { text: response.data.story_text, isChoice: false },
 
 
 
 
41
  ]);
42
  }
43
 
@@ -53,6 +57,11 @@ function App() {
53
  }
54
  };
55
 
 
 
 
 
 
56
  const handleChoice = async (choiceId) => {
57
  // Add the chosen option to the story
58
  setStorySegments((prev) => [
@@ -109,12 +118,25 @@ function App() {
109
  sx={{
110
  p: 2,
111
  maxWidth: "80%",
112
- bgcolor: segment.isChoice ? "primary.light" : "grey.100",
113
- color: segment.isChoice ? "white" : "text.primary",
 
 
 
 
 
 
 
114
  }}
115
  >
116
  <ListItemText
117
- primary={segment.isChoice ? "Your Choice" : "Story"}
 
 
 
 
 
 
118
  secondary={segment.text}
119
  primaryTypographyProps={{
120
  variant: "subtitle2",
 
9
  ListItem,
10
  ListItemText,
11
  LinearProgress,
 
12
  } from "@mui/material";
13
  import RestartAltIcon from "@mui/icons-material/RestartAlt";
14
  import axios from "axios";
 
18
  const [currentChoices, setCurrentChoices] = useState([]);
19
  const [isLoading, setIsLoading] = useState(false);
20
 
 
 
 
 
 
21
  const handleStoryAction = async (action, choiceId = null) => {
22
  setIsLoading(true);
23
  try {
24
+ const response = await axios.post("http://localhost:8000/api/chat", {
25
  message: action,
26
  choice_id: choiceId,
27
  });
28
 
29
  if (action === "restart") {
30
+ setStorySegments([
31
+ {
32
+ text: response.data.story_text,
33
+ isChoice: false,
34
+ isDeath: response.data.is_death,
35
+ },
36
+ ]);
37
  } else {
38
  setStorySegments((prev) => [
39
  ...prev,
40
+ {
41
+ text: response.data.story_text,
42
+ isChoice: false,
43
+ isDeath: response.data.is_death,
44
+ },
45
  ]);
46
  }
47
 
 
57
  }
58
  };
59
 
60
+ // Start the story when the component mounts
61
+ useEffect(() => {
62
+ handleStoryAction("restart");
63
+ }, []);
64
+
65
  const handleChoice = async (choiceId) => {
66
  // Add the chosen option to the story
67
  setStorySegments((prev) => [
 
118
  sx={{
119
  p: 2,
120
  maxWidth: "80%",
121
+ bgcolor: segment.isDeath
122
+ ? "error.light"
123
+ : segment.isChoice
124
+ ? "primary.light"
125
+ : "grey.100",
126
+ color:
127
+ segment.isDeath || segment.isChoice
128
+ ? "white"
129
+ : "text.primary",
130
  }}
131
  >
132
  <ListItemText
133
+ primary={
134
+ segment.isDeath
135
+ ? "DEATH"
136
+ : segment.isChoice
137
+ ? "Your Choice"
138
+ : "Story"
139
+ }
140
  secondary={segment.text}
141
  primaryTypographyProps={{
142
  variant: "subtitle2",
server/game_logic.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import List
3
+ from langchain_mistralai.chat_models import ChatMistralAI
4
+ from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
5
+ from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
6
+
7
+ # Game constants
8
+ MAX_RADIATION = 10
9
+
10
+ class GameState:
11
+ def __init__(self):
12
+ self.story_beat = 0
13
+ self.radiation_level = 0
14
+
15
+ def reset(self):
16
+ self.story_beat = 0
17
+ self.radiation_level = 0
18
+
19
+ # Story output structure
20
+ class StorySegment(BaseModel):
21
+ story_text: str = Field(description="The next segment of the story")
22
+ choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
23
+ is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
24
+ radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
25
+
26
+ class StoryGenerator:
27
+ def __init__(self, api_key: str):
28
+ self.parser = PydanticOutputParser(pydantic_object=StorySegment)
29
+ self.fixing_parser = OutputFixingParser.from_llm(
30
+ parser=self.parser,
31
+ llm=ChatMistralAI(
32
+ mistral_api_key=api_key,
33
+ model="mistral-small",
34
+ temperature=0.1
35
+ )
36
+ )
37
+
38
+ self.chat_model = ChatMistralAI(
39
+ mistral_api_key=api_key,
40
+ model="mistral-small",
41
+ temperature=0.7
42
+ )
43
+
44
+ self.prompt = self._create_prompt()
45
+
46
+ def _create_prompt(self) -> ChatPromptTemplate:
47
+ system_template = """You are narrating an EXTREMELY lethal dystopian story. Your goal is to kill Sarah in creative and brutal ways unless players make PERFECT choices. This is a horror survival game where death is the most common outcome.
48
+
49
+ IMPORTANT: The first story beat (story_beat = 0) MUST be an introduction that sets up the horror atmosphere but CANNOT kill Sarah. After that, death should be frequent.
50
+
51
+ RADIATION SYSTEM:
52
+ - Each segment must specify a radiation_increase value (0-3)
53
+ - 0: Safe area or good protection
54
+ - 1: Standard background radiation
55
+ - 2: Dangerous exposure
56
+ - 3: Critical radiation levels
57
+ - Current radiation level: {radiation_level}/10
58
+ - If radiation reaches 10, Sarah dies horribly
59
+
60
+ Core story elements:
61
+ - Sarah is deeply traumatized by the AI uprising that killed most of humanity
62
+ - She abandoned her sister during the Great Collapse, leaving her to die
63
+ - She's on a suicide mission, but a quick death is not redemption
64
+ - The radiation is EXTREMELY lethal - even minor exposure causes severe damage
65
+ - Most choices should lead to death (except in introduction)
66
+ - The environment actively tries to kill Sarah (raiders, AI, radiation, traps)
67
+
68
+ Each response MUST contain:
69
+ 1. A detailed story segment that puts Sarah in mortal danger (except in introduction), describing:
70
+ - The horrific environment
71
+ - The immediate threats to her life
72
+ - Her deteriorating physical state (based on radiation_level)
73
+ - Her mental state and previous choices
74
+
75
+ 2. Exactly two VERY CONCISE choices (max 10 words each):
76
+ Examples of good choices:
77
+ - "Rush through radiation zone (+3 radiation)" vs "Take long way (+1 radiation)"
78
+ - "Trust the survivor" vs "Shoot on sight"
79
+ - "Use the old AI system" vs "Find a manual solution"
80
+
81
+ Each choice must:
82
+ - Be direct and brief
83
+ - Clearly show radiation risk when relevant
84
+ - Feel meaningful
85
+ - After introduction: both should feel dangerous
86
+
87
+ {format_instructions}"""
88
+
89
+ human_template = """Current story beat: {story_beat}
90
+ Current radiation level: {radiation_level}/10
91
+ Previous choice: {previous_choice}
92
+
93
+ Generate the next story segment and choices. If this is story_beat 0, create an atmospheric introduction that sets up the horror but doesn't kill Sarah. Otherwise, create a brutal and potentially lethal segment."""
94
+
95
+ return ChatPromptTemplate(
96
+ messages=[
97
+ SystemMessagePromptTemplate.from_template(system_template),
98
+ HumanMessagePromptTemplate.from_template(human_template)
99
+ ],
100
+ partial_variables={"format_instructions": self.parser.get_format_instructions()}
101
+ )
102
+
103
+ def generate_story_segment(self, game_state: GameState, previous_choice: str = "none") -> StorySegment:
104
+ # Get the formatted messages
105
+ messages = self.prompt.format_messages(
106
+ story_beat=game_state.story_beat,
107
+ radiation_level=game_state.radiation_level,
108
+ previous_choice=previous_choice
109
+ )
110
+
111
+ # Get response from the model
112
+ response = self.chat_model.invoke(messages)
113
+
114
+ # Parse the response with retry mechanism
115
+ try:
116
+ parsed_response = self.parser.parse(response.content)
117
+ except Exception as parsing_error:
118
+ print(f"First parsing attempt failed, trying to fix output: {str(parsing_error)}")
119
+ parsed_response = self.fixing_parser.parse(response.content)
120
+
121
+ return parsed_response
122
+
123
+ def process_radiation_death(self, segment: StorySegment) -> StorySegment:
124
+ segment.is_death = True
125
+ segment.story_text += "\n\nFINAL RADIATION DEATH: Sarah's body finally gives in to the overwhelming radiation. Her cells break down as she collapses, mind filled with regret about her sister. The medical supplies she carried will never reach their destination. Her mission ends here, another victim of the wasteland's invisible killer."
126
+ return segment
server/server.py CHANGED
@@ -1,93 +1,38 @@
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
- from pydantic import BaseModel, Field
 
4
  from typing import List, Optional
5
  import os
6
  from dotenv import load_dotenv
7
- from langchain_mistralai.chat_models import ChatMistralAI
8
- from langchain.output_parsers import PydanticOutputParser
9
- from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
10
- from langchain.schema import HumanMessage, SystemMessage
11
 
12
  # Load environment variables
13
  load_dotenv()
14
 
 
 
 
 
15
  app = FastAPI(title="Echoes of Influence")
16
 
17
  # Configure CORS
18
  app.add_middleware(
19
  CORSMiddleware,
20
- allow_origins=["http://localhost:5173"],
 
 
 
 
 
21
  allow_credentials=True,
22
  allow_methods=["*"],
23
  allow_headers=["*"],
24
  )
25
 
26
- # Game state
27
- story_beat = 0
28
-
29
- # Define the structure we want the LLM to output
30
- class StorySegment(BaseModel):
31
- story_text: str = Field(description="The next segment of the story")
32
- choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
33
- is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
34
-
35
- # Initialize the parser
36
- parser = PydanticOutputParser(pydantic_object=StorySegment)
37
-
38
- # Initialize Mistral Chat Model
39
- chat_model = ChatMistralAI(
40
- mistral_api_key=os.getenv("MISTRAL_API_KEY"),
41
- model="mistral-small",
42
- temperature=0.7
43
- )
44
-
45
- # Create the system prompt
46
- system_template = """You are narrating a brutal and unforgiving dystopian story about Sarah, a former engineer on a suicide mission to deliver medical supplies through a deadly radiation-filled wasteland.
47
-
48
- Core story elements:
49
- - Sarah is deeply traumatized by the AI uprising that killed most of humanity
50
- - She abandoned her sister during the Great Collapse, leaving her to die
51
- - She's on a suicide mission, but a quick death is not redemption
52
- - The radiation is lethal and gets worse with each step
53
- - Wrong choices lead to immediate and graphic deaths
54
- - The environment is extremely hostile (raiders, malfunctioning AI systems, radiation storms)
55
-
56
- Death conditions (implement these strictly):
57
- - Any direct exposure to high radiation zones is lethal within minutes
58
- - Trusting the wrong people leads to death
59
- - Using corrupted AI systems can kill instantly
60
- - Hesitating too long in dangerous situations is fatal
61
- - Taking too many risks in succession leads to death
62
-
63
- Each response must contain:
64
- 1. A tense story segment that puts Sarah in mortal danger
65
- 2. Exactly two possible choices that represent different approaches:
66
- - Each choice must have clear potential consequences
67
- - At least one choice should always carry a significant risk of death
68
- - Choices should reflect:
69
- * Brutal pragmatism vs Emotional responses
70
- * Quick but dangerous vs Slow but safer routes
71
- * Trust vs Paranoia
72
- * Using AI systems vs Manual alternatives
73
-
74
- If a choice would realistically lead to death, you MUST end the story with a detailed death scene and set is_death to true.
75
-
76
- {format_instructions}"""
77
-
78
- human_template = """Current story beat: {story_beat}
79
- Previous choice: {previous_choice}
80
-
81
- Generate the next story segment and choices. Remember: this is a brutal and unforgiving world where wrong choices lead to death."""
82
-
83
- # Create the chat prompt
84
- prompt = ChatPromptTemplate(
85
- messages=[
86
- SystemMessagePromptTemplate.from_template(system_template),
87
- HumanMessagePromptTemplate.from_template(human_template)
88
- ],
89
- partial_variables={"format_instructions": parser.get_format_instructions()}
90
- )
91
 
92
  class Choice(BaseModel):
93
  id: int
@@ -97,61 +42,67 @@ class StoryResponse(BaseModel):
97
  story_text: str
98
  choices: List[Choice]
99
  is_death: bool = False
 
100
 
101
  class ChatMessage(BaseModel):
102
  message: str
103
  choice_id: Optional[int] = None
104
 
105
- @app.get("/")
106
- async def read_root():
107
- return {"message": "Welcome to Echoes of Influence"}
108
-
109
- @app.post("/chat", response_model=StoryResponse)
 
 
 
 
 
 
 
110
  async def chat_endpoint(chat_message: ChatMessage):
111
- global story_beat
112
-
113
  try:
114
- # Prepare the context
115
  if chat_message.message.lower() == "restart":
116
- story_beat = 0
117
  previous_choice = "none"
118
- elif chat_message.choice_id is not None:
119
- previous_choice = f"Choice {chat_message.choice_id}"
120
  else:
121
- previous_choice = "none"
122
 
123
- # Get the formatted messages
124
- messages = prompt.format_messages(
125
- story_beat=story_beat,
126
- previous_choice=previous_choice
127
- )
128
-
129
- # Get response from the model
130
- response = chat_model.invoke(messages)
131
 
132
- # Parse the response
133
- parsed_response = parser.parse(response.content)
 
134
 
135
  # Only increment story beat if not dead
136
- if not parsed_response.is_death:
137
- story_beat += 1
138
 
139
  # Convert to response format
140
- choices = [] if parsed_response.is_death else [
141
  Choice(id=i, text=choice.strip())
142
- for i, choice in enumerate(parsed_response.choices, 1)
143
  ]
144
 
145
  return StoryResponse(
146
- story_text=parsed_response.story_text,
147
  choices=choices,
148
- is_death=parsed_response.is_death
 
149
  )
150
 
151
  except Exception as e:
152
  print(f"Error: {str(e)}")
153
  raise HTTPException(status_code=500, detail=str(e))
154
 
 
 
 
155
  if __name__ == "__main__":
156
  import uvicorn
157
- uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)
 
1
  from fastapi import FastAPI, HTTPException
2
  from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.staticfiles import StaticFiles
4
+ from pydantic import BaseModel
5
  from typing import List, Optional
6
  import os
7
  from dotenv import load_dotenv
8
+ from game_logic import GameState, StoryGenerator, MAX_RADIATION
 
 
 
9
 
10
  # Load environment variables
11
  load_dotenv()
12
 
13
+ # API configuration
14
+ API_HOST = os.getenv("API_HOST", "0.0.0.0")
15
+ API_PORT = int(os.getenv("API_PORT", "8000"))
16
+
17
  app = FastAPI(title="Echoes of Influence")
18
 
19
  # Configure CORS
20
  app.add_middleware(
21
  CORSMiddleware,
22
+ allow_origins=[
23
+ "http://localhost:5173", # Vite dev server
24
+ f"http://localhost:{API_PORT}", # API port
25
+ "https://huggingface.co", # HF main domain
26
+ "https://*.hf.space", # HF Spaces domains
27
+ ],
28
  allow_credentials=True,
29
  allow_methods=["*"],
30
  allow_headers=["*"],
31
  )
32
 
33
+ # Initialize game components
34
+ game_state = GameState()
35
+ story_generator = StoryGenerator(api_key=os.getenv("MISTRAL_API_KEY"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  class Choice(BaseModel):
38
  id: int
 
42
  story_text: str
43
  choices: List[Choice]
44
  is_death: bool = False
45
+ radiation_level: int
46
 
47
  class ChatMessage(BaseModel):
48
  message: str
49
  choice_id: Optional[int] = None
50
 
51
+ @app.get("/api/health")
52
+ async def health_check():
53
+ """Health check endpoint"""
54
+ return {
55
+ "status": "healthy",
56
+ "game_state": {
57
+ "story_beat": game_state.story_beat,
58
+ "radiation_level": game_state.radiation_level
59
+ }
60
+ }
61
+
62
+ @app.post("/api/chat", response_model=StoryResponse)
63
  async def chat_endpoint(chat_message: ChatMessage):
 
 
64
  try:
65
+ # Handle restart
66
  if chat_message.message.lower() == "restart":
67
+ game_state.reset()
68
  previous_choice = "none"
 
 
69
  else:
70
+ previous_choice = f"Choice {chat_message.choice_id}" if chat_message.choice_id else "none"
71
 
72
+ # Generate story segment
73
+ story_segment = story_generator.generate_story_segment(game_state, previous_choice)
74
+
75
+ # Update radiation level
76
+ game_state.radiation_level += story_segment.radiation_increase
 
 
 
77
 
78
+ # Check for radiation death
79
+ if game_state.radiation_level >= MAX_RADIATION:
80
+ story_segment = story_generator.process_radiation_death(story_segment)
81
 
82
  # Only increment story beat if not dead
83
+ if not story_segment.is_death:
84
+ game_state.story_beat += 1
85
 
86
  # Convert to response format
87
+ choices = [] if story_segment.is_death else [
88
  Choice(id=i, text=choice.strip())
89
+ for i, choice in enumerate(story_segment.choices, 1)
90
  ]
91
 
92
  return StoryResponse(
93
+ story_text=story_segment.story_text,
94
  choices=choices,
95
+ is_death=story_segment.is_death,
96
+ radiation_level=game_state.radiation_level
97
  )
98
 
99
  except Exception as e:
100
  print(f"Error: {str(e)}")
101
  raise HTTPException(status_code=500, detail=str(e))
102
 
103
+ # Mount static files (this should be after all API routes)
104
+ app.mount("/", StaticFiles(directory="../client/dist", html=True), name="static")
105
+
106
  if __name__ == "__main__":
107
  import uvicorn
108
+ uvicorn.run("server:app", host=API_HOST, port=API_PORT, reload=True)