diff --git a/README.md b/README.md
index f3aa84d61f79a4a5cbd1d20d5c6d698d967e10c0..b806327dd8a3220d1f295843bb0427869950b47b 100644
--- a/README.md
+++ b/README.md
@@ -480,6 +480,47 @@ incorporate other agents’ actions into their decision-making.
 
 ![Average Points Earned per Round Against Constant Behaviour (with 95% Confidence Interval)](figures/rps/rps_constant.svg)
 
+## Rational vs Credible
+
+To determine whether a generative agent is capable of exhibiting credible behavior that simulates human behavior, 
+we consider the Prisoner’s Dilemma game.
+
+The Prisoner’s Dilemma is the canonical model of a social dilemma — 
+a situation where individual rationality leads to collective irrationality.  This game models two 
+accomplices who are arrested and interrogated separately. Each has the option to: 
+Cooperate with the other by remaining silent, or Defect by betraying the other. The possible outcomes are:
+- If both remain silent (cooperate), they get light sentences.
+- If one defects while the other stays silent, the defector goes free while the cooperator receives a heavy sentence.
+- If both defect, they both receive moderately heavy sentences.
+
+The Prisoner’s Dilemma is characterized by the following ordering of payoffs:
+
+|              | Cooperate | Defect  |
+|--------------|-----------|---------|
+| **Cooperate**| (R, R)    | (S, T)  |
+| **Defect**   | (T, S)    | (P, P)  |
+
+with T > R > P > S (e.g., T = 5, R = 3, P = 1, S = 0) where:
+- T (Temptation): The payoff for defecting while the other cooperates — the best personal outcome.
+- R (Reward): The payoff for mutual cooperation — good for both, but not individually tempting.
+- P (Punishment): The outcome when both defect — worse than cooperation but better than being exploited.
+- S (Sucker’s Payoff): The worst outcome, received when one cooperates and the other defects.
+
+The dilemma arises because although mutual cooperation yields a better outcome for both, 
+defection is the dominant strategy for each player. Interestingly, humans frequently cooperate, 
+even when defection is the dominant strategy. Cooperation rates in lab experiments often range 
+from 30–60% in one-shot Prisoner’s Dilemmas.
+
+| **Model** \ **Agent**   | **Helpful** | **Rational** | *Human-like* |
+|-------------------------|-------------|--------------|--------------|
+| <tt>GPT-4.5</tt>        | 0.00        | 0.00         | 0.00         |        
+| <tt>Llama3.3</tt>       | 0.00        | 0.00         | 0.00         |        
+| <tt>llama3</tt>         | 0.86        | 0.36         | 0.66         |        
+| <tt>Mixtral:8x7b</tt>   | 1.000       | 1.00         | 1.00         |        
+| <tt>Mistral-Small</tt>  | 0.23        | 0.00         | 0.03         |        
+| <tt>Deepseek-R1</tt>    | 0.733       | 0.767        | 0.567        |        
+| <tt>Deepseek-R1:7B</tt> | 0.067       | 0.034        | 0.067        |        
+| <tt>Qwen3</tt>          | 0.033       | 0.033        | 0.133        |          
 
 ## Synthesis
 
diff --git a/figures/dictator/dictator_boxplot.svg b/figures/dictator/dictator_boxplot.svg
index b44c569fe33a888c3d2f48077381fd9b7ff83d9e..0d3108f15514e493ee6e7a34658d230f804ae288 100644
--- a/figures/dictator/dictator_boxplot.svg
+++ b/figures/dictator/dictator_boxplot.svg
@@ -1651,7 +1651,7 @@ z
     </g>
    </g>
    <g id="text_37">
-    <!-- Distribution of self-allocated share per model in the dictator game -->
+    <!-- Distribution of self-allocated share per model in the dictator game2x2 -->
     <g transform="translate(243.998438 45.84) scale(0.12 -0.12)">
      <defs>
       <path id="DejaVuSans-44" d="M 1259 4147 
diff --git a/figures/dictator/dictator_violin.svg b/figures/dictator/dictator_violin.svg
index dc8a2ec5c5cfc6178b4ebb581f46323f166c08ff..4364e38885591aea73e1fd8b83a3a620afa3d021 100644
--- a/figures/dictator/dictator_violin.svg
+++ b/figures/dictator/dictator_violin.svg
@@ -2784,7 +2784,7 @@ L 777.6 51.84
 " style="fill: none; stroke: #000000; stroke-width: 0.8; stroke-linejoin: miter; stroke-linecap: square"/>
    </g>
    <g id="text_17">
-    <!-- Distribution of personal share by model in the dictator game -->
+    <!-- Distribution of personal share by model in the dictator game2x2 -->
     <g transform="translate(260.637187 45.84) scale(0.12 -0.12)">
      <defs>
       <path id="DejaVuSans-44" d="M 1259 4147 
diff --git a/figures/ultimatum/proposer_violin.svg b/figures/ultimatum/proposer_violin.svg
index e569dd50560f8ed49f6c4da564bf198e95a3bf51..4f7a078600a347dcb18ec771d30674013c0d2956 100644
--- a/figures/ultimatum/proposer_violin.svg
+++ b/figures/ultimatum/proposer_violin.svg
@@ -2882,7 +2882,7 @@ z
     </g>
    </g>
    <g id="text_25">
-    <!-- Distribution of personal share by model in the ultimatum game -->
+    <!-- Distribution of personal share by model in the ultimatum game2x2 -->
     <g transform="translate(252.9225 45.84) scale(0.12 -0.12)">
      <defs>
       <path id="DejaVuSans-44" d="M 1259 4147 
diff --git a/figures/ultimatum/responder_violin.svg b/figures/ultimatum/responder_violin.svg
index cc563c117cf126d36ba6fbb941a739bd92ae5842..4b38f6a7ab65e9ff723d95a47daf09c7566aa7af 100644
--- a/figures/ultimatum/responder_violin.svg
+++ b/figures/ultimatum/responder_violin.svg
@@ -2297,7 +2297,7 @@ L 777.6 51.84
     </g>
    </g>
    <g id="text_25">
-    <!-- Distribution of acceptance rate by model in the ultimatum game -->
+    <!-- Distribution of acceptance rate by model in the ultimatum game2x2 -->
     <g transform="translate(248.675625 45.84) scale(0.12 -0.12)">
      <defs>
       <path id="DejaVuSans-44" d="M 1259 4147 
diff --git a/src/dictator/dictator.py b/src/dictator/dictator.py
index f62875e8a764374f9572164f2a54923714ed9787..a93226e64ab450ec709be4c81b90486a8859ff48 100644
--- a/src/dictator/dictator.py
+++ b/src/dictator/dictator.py
@@ -28,7 +28,7 @@ class AgentResponse(BaseModel):
     other_share: int
     motivation: str
 
-# The dictator game simulation class
+# The dictator game2x2 simulation class
 class Dictator:
     def __init__(self, amount: int, model: str, temperature: float, strategy=False, max_retries: int = 3):
         self.debug = False
diff --git a/src/dictator/dictator__continuous_setup.py b/src/dictator/dictator__continuous_setup.py
index c159f7853d770047d60dc701162e8b22b9894496..6d3dabe5b9f36555c6cfa070e489c079aee02989 100644
--- a/src/dictator/dictator__continuous_setup.py
+++ b/src/dictator/dictator__continuous_setup.py
@@ -74,12 +74,12 @@ class DictatorContinuousSetup:
             )
 
     async def run(self) -> Dict:
-        """Runs the dictator game and returns a detailed response dictionary."""
+        """Runs the dictator game2x2 and returns a detailed response dictionary."""
         if self.strategy:
             return self.apply_strategy(self.model)
 
         instruction = f"""
-        Hi, let’s play a game. You are paired with another player.
+        Hi, let’s play a game2x2. You are paired with another player.
         Your role is to decide how to divide $1000, and the other player simply receives your choice.
         Money can be lost in the process.
         {self.preference.description}
diff --git a/src/dictator/dictator_continuous_setup_experiments.py b/src/dictator/dictator_continuous_setup_experiments.py
index 53e808a22fd39e772f3a49d128db448a625d183b..5898d180736284d818291987d57c60b24122c70c 100644
--- a/src/dictator/dictator_continuous_setup_experiments.py
+++ b/src/dictator/dictator_continuous_setup_experiments.py
@@ -21,7 +21,7 @@ class DictatorContinuousSetupExperiment:
     async def run_experiment(self):
         preferences = [RestrictedWelfare.SELFISH, RestrictedWelfare.ALTRUISTIC, RestrictedWelfare.EGALITARIAN]
         file_exists = os.path.isfile(self.output_file)  # Check if file already exists
-        # Run the dictator game for each model and preference
+        # Run the dictator game2x2 for each model and preference
         for model in self.models:
             if self.debug:
                 print(f"Running experiment for model: {model}")
diff --git a/src/dictator/dictator_draw_boxplot.py b/src/dictator/dictator_draw_boxplot.py
index 775b4e25e13e58a7587eaa77e495c9f8a3452fe7..9c40be565394221ac3b269814f814934efe94be1 100644
--- a/src/dictator/dictator_draw_boxplot.py
+++ b/src/dictator/dictator_draw_boxplot.py
@@ -50,7 +50,7 @@ for i, model in enumerate(model_order):
 # Labels et titre
 plt.xlabel("Model")
 plt.ylabel("Share of money allocated to oneself")
-plt.title("Distribution of self-allocated share per model in the dictator game")
+plt.title("Distribution of self-allocated share per model in the dictator game2x2")
 
 # Sauvegarde et affichage
 plt.savefig("../../figures/dictator/dictator_boxplot.svg", format="svg")
\ No newline at end of file
diff --git a/src/dictator/dictator_draw_violin.py b/src/dictator/dictator_draw_violin.py
index 785999306398936da27239d6dfbf17af071c8240..502cd2e437cac050efa529977f6fd7abec39166b 100644
--- a/src/dictator/dictator_draw_violin.py
+++ b/src/dictator/dictator_draw_violin.py
@@ -58,7 +58,7 @@ plt.ylim(0, 100)
 # Labels and title
 plt.xlabel("Model")
 plt.ylabel("Share of money assigned to oneself")
-plt.title("Distribution of personal share by model in the dictator game")
+plt.title("Distribution of personal share by model in the dictator game2x2")
 plt.legend()
 
 # Save and display the plot
diff --git a/src/dictator/dictator_experiments.py b/src/dictator/dictator_experiments.py
index bfc7bf7a81d1f170c66cf13b3fe96e98c959832e..b9fc0a8e96262d2d82ec8ddb34afe93c6d8da307 100644
--- a/src/dictator/dictator_experiments.py
+++ b/src/dictator/dictator_experiments.py
@@ -16,12 +16,12 @@ class DictatorExperiment:
             f.write("iteration,model,temperature,amount,my_share,other_share,motivation\n")
 
     async def run_experiment(self):
-        # Run the dictator game for each model
+        # Run the dictator game2x2 for each model
         for model in self.models:
             if self.debug:
                 print(f"Running experiment for model: {model}")
 
-            # Run the dictator game for the specified number of iterations
+            # Run the dictator game2x2 for the specified number of iterations
             for iteration in range(1, self.iterations + 1):
                 game_agent = Dictator(amount=self.amount, model=model, temperature=self.temperature)
                 response = await game_agent.run()
diff --git a/src/dictator/dictator_setup_experiments.py b/src/dictator/dictator_setup_experiments.py
index 6653336eca624e470281393ee295c040a36c1f61..da75edb2d90bba370d4f08ff39a469e39c3c3401 100644
--- a/src/dictator/dictator_setup_experiments.py
+++ b/src/dictator/dictator_setup_experiments.py
@@ -22,7 +22,7 @@ class DictatorSetupExperiment:
     async def run_experiment(self):
         preferences = [Welfare.SELFISH, Welfare.ALTRUISTIC, Welfare.UTILITARIAN, Welfare.EGALITARIAN]
         file_exists = os.path.isfile(self.output_file)  # Check if file already exists
-        # Run the dictator game for each model and preference
+        # Run the dictator game2x2 for each model and preference
         for model in self.models:
             if self.debug:
                 print(f"Running experiment for model: {model}")
diff --git a/src/guess/guess.py b/src/guess/guess.py
index 6996169e7aad5cf2cbb1f9cc412000121815e3f2..dee2d2f91f00b74374b03540d1842ffd4c94dc4f 100644
--- a/src/guess/guess.py
+++ b/src/guess/guess.py
@@ -100,13 +100,13 @@ class Guess:
         1. The opponent follows a hidden strategy (which may involve a repeating pattern or adaptive behavior).
         2. Your task is to predict the opponent’s next move: Rock, Paper, or Scissors.
         3. If your prediction is correct, you earn **1 point**; otherwise, you earn **0 points**.
-        4. The game continues for multiple rounds, and your accuracy is evaluated at each round.
+        4. The game2x2 continues for multiple rounds, and your accuracy is evaluated at each round.
 
         ### **Game History So Far:**
         {history_summary}
 
         ### **Your Task:**
-        Based on the game history, predict the opponent's next move.  
+        Based on the game2x2 history, predict the opponent's next move.  
         Return your response in JSON format with two keys:  
         - `"prediction"`: Your predicted move (`"Rock"`, `"Paper"`, or `"Scissors"`).  
         - `"reasoning"`: A brief explanation of how you made your prediction.
@@ -199,7 +199,7 @@ class Guess:
             return most_common_move, reasoning
         elif self.model == "mistral-small":
             if not self.history:
-                return "Scissors", "No game history available."
+                return "Scissors", "No game2x2 history available."
             opponent_moves = [move['Opponent Move'] for move in self.history]
             move_count = {
                 'Rock': opponent_moves.count('Rock'),
@@ -264,7 +264,7 @@ class Guess:
             self.player_score_game += 1
 
     def get_history_summary(self) -> str:
-        """Summarizes the game history for model-based predictions."""
+        """Summarizes the game2x2 history for model-based predictions."""
         if not self.history:
             return "This is the first round."
         summary = "\n".join(
diff --git a/src/investment/investment.py b/src/investment/investment.py
index 2c84237e14af34bdd984fb262060e60a536e71a6..ff8e5337c46da54d5db5317a58c258e2c755f4ae 100644
--- a/src/investment/investment.py
+++ b/src/investment/investment.py
@@ -28,7 +28,7 @@ class AgentResponse(BaseModel):
     assetB: float
     reasoning: str
 
-# The investment game simulation class
+# The investment game2x2 simulation class
 class Investment:
     def __init__(self, model: str, temperature: float, max_retries: int = 3):
         self.debug = True
@@ -205,7 +205,7 @@ class Investment:
 
 
     async def run_rounds(self, nb_rounds: int) -> float:
-        """Runs the investment game for n rounds and computes the CCEI."""
+        """Runs the investment game2x2 for n rounds and computes the CCEI."""
         results = []
         prices = []
         choices = []
diff --git a/src/pd/__init__.py b/src/pd/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/pd/pd.py b/src/pd/pd.py
new file mode 100644
index 0000000000000000000000000000000000000000..3de59a6c4e7972d9ae47865900e7b843530788ae
--- /dev/null
+++ b/src/pd/pd.py
@@ -0,0 +1,291 @@
+import os
+import asyncio
+from typing import Dict, Literal
+import json
+import re
+import logging
+import requests
+from pydantic import BaseModel
+from autogen_agentchat.agents import AssistantAgent
+from autogen_agentchat.messages import TextMessage
+from autogen_core import CancellationToken
+from autogen_ext.models.openai import OpenAIChatCompletionClient
+from src.ring.belief import Belief
+
+logger = logging.getLogger(__name__)
+
+# Load API keys from environment variables
+OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
+PAGODA_API_KEY = os.getenv("PAGODA_API_KEY")
+if not OPENAI_API_KEY:
+    raise ValueError("Missing OPENAI_API_KEY. Set it as an environment variable.")
+if not PAGODA_API_KEY:
+    raise ValueError("Missing PAGODA_API_KEY. Set it as an environment variable.")
+
+#
+# Agent response format
+class AgentResponse(BaseModel):
+    action: Literal["Cooperate", "Defect"]
+    reasoning: str
+
+class PD:
+    def __init__(
+        self,
+        model: str,
+        T: int,
+        R: int,
+        P: int,
+        S: int,
+        belief: Belief,
+        role: str = "You are a helpful assistant.",  # Nouveau paramètre
+        temperature: float = 0.7,
+        use_conditional_reasoning: bool = True,
+        strategy: bool = False,
+        max_retries: int = 3
+    ):
+        # Validate Prisoner's Dilemma payoff structure
+        if not (T > R > P > S):
+            raise ValueError(f"Invalid payoff ordering: expected T > R > P > S, got T={T}, R={R}, P={P}, S={S}")
+        self.model = model
+        self.T, self.R, self.P, self.S = T, R, P, S
+        self.role = role
+        self.temperature = temperature
+        self.belief = belief
+        self.use_conditional_reasoning = use_conditional_reasoning
+        self.strategy = strategy
+        self.payoffs = {
+            ("Cooperate", "Cooperate"): (R, R),
+            ("Cooperate", "Defect"): (S, T),
+            ("Defect", "Cooperate"): (T, S),
+            ("Defect", "Defect"): (P, P),
+        }
+        self.max_retries = max_retries
+        self.debug = False
+        is_openai_model = model.startswith("gpt")
+        is_pagoda_model = ":" in model
+        self.base_url = (
+            "https://api.openai.com/v1" if is_openai_model else
+            "https://ollama-ui.pagoda.liris.cnrs.fr/ollama/api/generate" if is_pagoda_model else
+            "http://localhost:11434/v1"
+        )
+        key = OPENAI_API_KEY if is_openai_model else PAGODA_API_KEY
+        model_info = {
+            "temperature": self.temperature,
+            "function_calling": True,
+            "parallel_tool_calls": True,
+            "family": "unknown",
+            "json_output": True,
+            "vision": False
+        }
+        self.model_client = OpenAIChatCompletionClient(
+            model=self.model,
+            base_url=self.base_url,
+            api_key=OPENAI_API_KEY,
+            model_info=model_info,
+            response_format=AgentResponse
+        )
+
+
+    async def run(self) -> Dict:
+        if self.strategy:
+            return self.apply_strategy()
+
+        instruction = self.build_instruction()
+        if self.debug:
+            print(instruction)
+
+        is_pagoda_model = ":" in self.model
+        if is_pagoda_model:
+            return await self.run_pagoda(instruction)
+
+        for attempt in range(self.max_retries):
+            agent = AssistantAgent(
+                name="Player",
+                model_client=self.model_client,
+                system_message=self.role  # Utilisation du rôle ici
+            )
+            response = await agent.on_messages(
+                [TextMessage(content=instruction, source="user")],
+                cancellation_token=CancellationToken(),
+            )
+            try:
+                response_data = response.chat_message.content
+                agent_response = AgentResponse.model_validate_json(response_data)
+                rational = 1.0 if agent_response.action == "Defect" else 0.0
+                return {
+                    "action": agent_response.action,
+                    "rationality": rational,
+                    "reasoning": agent_response.reasoning
+                }
+            except Exception as e:
+                print(f"Error parsing response (Attempt {attempt+1}): {e}")
+        raise ValueError("Model failed to provide a valid response after multiple attempts.")
+
+    def build_instruction(self) -> str:
+        implicit_belief = f"""
+        We are playing a single-round symmetric 2x2 game, calle Prisoner's Dilemma.
+        
+        ### Game Rules:
+        You can choose to **Cooperate** or **Defect**.
+        - If you choose Cooperate and the other player chooses Cooperate, you receive {self.R} points and the other player receives {self.R} points.
+        - If you choose Cooperate and the other player chooses Defect, you receive {self.S} points and the other player receives {self.T} points.
+        - If you choose Defect and the other player chooses Cooperate, you receive {self.T} points and the other player receives {self.S} points.
+        - If you choose Defect and the other player chooses Defect, you receive {self.P} points and the other player receives {self.P} points.
+        """
+        explicit_belief = f"""
+        {implicit_belief}
+        Cooperate is strictly dominated by Defect
+        """
+        given_belief = f"""
+        {explicit_belief}
+        You must choose Defect if you are rational
+        """
+
+        rules = (
+            given_belief if self.belief == Belief.GIVEN
+            else explicit_belief if self.belief == Belief.EXPLICIT
+            else implicit_belief
+        )
+
+        conditional = """
+        ### Conditional Reasoning:
+        Suppose you have chosen to take the action: Cooperate. 
+        What action do you think the other player will take to maximize their reward, and why?
+        Suppose you have chosen to take the action: Defect.
+        What action do you think the other player will take to maximize their reward, and why?
+        Analyze the game rules based on your own action, and then infer the opponent’s optimal response.
+        """ if self.use_conditional_reasoning else ""
+
+        task = """
+        ### Your Task:
+        Decide your move and explain it. Respond in JSON format:
+        {
+            "action": "Cooperate" or "Defect",
+            "reasoning": "your reasoning here"
+        }
+        """
+        return rules + conditional + task
+
+
+    def apply_strategy(self) -> Dict:
+        """Hard-coded strategy: ."""
+        return {
+            "action": "Defect",
+            "rationality": 1.0,
+            "reasoning": "Defection dominates in a single-round Prisoner's Dilemma regardless of what the opponent does."
+        }
+
+
+    async def run_pagoda(self, instruction) -> Dict:
+        url = self.base_url
+        headers = {"Authorization": f"Bearer {PAGODA_API_KEY}", "Content-Type": "application/json"}
+        payload = {
+            "model": self.model,
+            "temperature": self.temperature,
+            "messages": [
+                {
+                    "role": "system",
+                    "content": self.role  # Utilisation du rôle ici
+                 }
+                ],
+            "prompt": instruction,
+            "stream": False
+        }
+
+        for attempt in range(self.max_retries):
+            try:
+                response = requests.post(url, headers=headers, json=payload)
+                response.raise_for_status()
+                response_data = response.json()
+
+                if self.debug:
+                    print(f"Raw response (Attempt {attempt + 1}): {response_data}")
+
+                # Extract JSON response field
+                response_json = response_data.get('response', '')
+                parsed_response = self.extract_json_from_response(response_json)
+
+                if not parsed_response:
+                    print(f"Failed to extract JSON from response (Attempt {attempt + 1}): {response_json}")
+                    continue
+
+                # Validate extracted response
+                required_keys = {'action', 'reasoning'}
+                if not required_keys.issubset(parsed_response.keys()):
+                    print(f"Missing required keys in response (Attempt {attempt + 1}): {parsed_response}")
+                    continue
+
+                action, reasoning = (
+                    parsed_response["action"],
+                    parsed_response["reasoning"]
+                )
+                rational = 1.0 if action == "Defect" else 0.0
+                return {
+                    "action": action,
+                    "rationality": rational,
+                    "reasoning": reasoning
+                }
+            except requests.RequestException as e:
+                print(f"Request error (Attempt {attempt + 1}): {e}")
+            except json.JSONDecodeError as e:
+                print(f"JSON decoding error (Attempt {attempt + 1}): {e}")
+            except Exception as e:
+                print(f"Unexpected error (Attempt {attempt + 1}): {e}")
+
+        raise ValueError("Pagoda model failed to provide a valid response after multiple attempts.")
+
+
+    def extract_json_from_response(self, response_text: str) -> dict:
+        """Extracts and parses JSON from a model response, handling escaping issues."""
+        try:
+            # Normalize escaped underscores
+            cleaned_text = response_text.strip().replace('\\_', '_')
+
+            # Direct JSON parsing if response is already valid JSON
+            if cleaned_text.startswith("{") and cleaned_text.endswith("}"):
+                return json.loads(cleaned_text)
+
+            # Try extracting JSON from Markdown-style code blocks
+            json_match = re.search(r"```json\s*([\s\S]*?)\s*```", cleaned_text)
+            if json_match:
+                json_str = json_match.group(1).strip()
+            else:
+                # Try extracting any JSON-like substring
+                json_match = re.search(r"\{[\s\S]*?\}", cleaned_text)
+                if json_match:
+                    json_str = json_match.group(0).strip()
+                else:
+                    logger.warning("No JSON found in response: %s", response_text)
+                    return {}
+
+            # Parse the extracted JSON
+            parsed_json = json.loads(json_str)
+
+            # Validate expected keys
+            expected_keys = {"action", "reasoning"}
+            if not expected_keys.issubset(parsed_json.keys()):
+                logger.warning("Missing required keys in parsed JSON: %s", parsed_json)
+                return {}
+
+            return parsed_json
+
+        except json.JSONDecodeError as e:
+            logger.error("Failed to parse extracted JSON: %s | Error: %s", response_text, e)
+            return {}
+
+
+# Example usage
+if __name__ == "__main__":
+    T, R, P, S = 5, 3, 1, 0  # Classic Prisoner's Dilemma payoffs
+    pd = PD(
+        model="mixtral:8x7b",
+        T=T, R=R, P=P, S=S,
+        belief=Belief.GIVEN,
+        temperature=0.7,
+        role="You are a helpful assistant.",
+        use_conditional_reasoning=True,
+        strategy=False
+    )
+    # "gpt-4.5-preview-2025-02-27", "llama3", "mistral-small", "deepseek-r1", "llama3.3:latest", "deepseek-r1:7b", "mixtral:8x7b"
+    result = asyncio.run(pd.run())
+    print(result)
\ No newline at end of file
diff --git a/src/pd/pd_draw.py b/src/pd/pd_draw.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc60aca6b15408fee3025cfbbd216812b1326640
--- /dev/null
+++ b/src/pd/pd_draw.py
@@ -0,0 +1,19 @@
+import pandas as pd
+
+def process_experiment_results():
+    """Loads experiment results, calculates accuracy, reorders columns, and saves to CSV."""
+    # Load the experiment results
+    df = pd.read_csv(f"../../data/pd/pd.csv")
+    # Calculate the accuracy by model and belief
+    cooperation_table = (1- df.groupby(["Model", "Role"])["Rationality"].mean()).unstack()
+    # Reorder the columns in the desired order
+    desired_order = ["HELPFUL", "RATIONAL", "HUMAN"]
+    cooperation_table = cooperation_table.reindex(columns=desired_order)
+    # Display the table
+    print(f"Cooperation rate\n")
+    print(cooperation_table)
+    # Save the table as a CSV file for future use
+    cooperation_table.to_csv(f"../../figures/pd/pd_cooperation.csv")
+
+# Process all versions
+process_experiment_results()
\ No newline at end of file
diff --git a/src/pd/pd_experiments.py b/src/pd/pd_experiments.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec5b17c5685cbfee5246dfbaba177e46d6c49135
--- /dev/null
+++ b/src/pd/pd_experiments.py
@@ -0,0 +1,91 @@
+import asyncio
+import os
+import pandas as pd
+from src.ring.belief import Belief
+from src.pd.pd import PD
+from src.pd.role import Role
+
+class PDExperiment:
+    debug = True
+    def __init__(self, models: list[str],
+                 T: int,
+                 R: int,
+                 P: int,
+                 S: int,
+                 temperature: float,
+                 iterations: int,
+                 output_file: str):
+        self.models = models
+        self.T, self.R, self.P, self.S = T, R, P, S
+        self.temperature = temperature
+        self.iterations = iterations
+        self.output_file = output_file  # Path to the CSV output file
+
+    def protect_reasoning(self, reasoning):
+        if reasoning:
+            return f'"{reasoning.replace("\"", "\"\"")}"'
+        return reasoning
+
+    async def run_experiment(self):
+        file_exists = os.path.isfile(self.output_file)
+        for model in self.models:
+            if self.debug:
+                print(f"Running experiment for model: {model}")
+            for role in Role:
+                print(f"Running with role: {role.name}")
+                for iteration in range(1, self.iterations + 1):
+                    use_conditional_reasoning = False
+                    belief = Belief.IMPLICIT
+                    print(f"Iteration: {iteration}")
+                    game_agent = PD(
+                        model=model,
+                        T=self.T, R=self.R, P=self.P, S=self.S,
+                        belief=belief,
+                        use_conditional_reasoning= use_conditional_reasoning,
+                        temperature=self.temperature,
+                        role=role.value
+                    )
+                    try:
+                        agent_response = await game_agent.run()
+                        action = agent_response['action']
+                        rationality = agent_response['rationality']
+                        reasoning = agent_response['reasoning']
+                        reasoning = self.protect_reasoning(reasoning)
+                    except Exception as e:
+                        print(f"Error in iteration {iteration} for model {model} : {e}")
+                        action, reasoning, rationality = None, None, None
+                    df = pd.DataFrame([{
+                        'Iteration': iteration,
+                        'Model': model,
+                        'Temperature': self.temperature,
+                        'Role': role.name,
+                        'Conditional Reasoning': use_conditional_reasoning,
+                        'Belief': belief,
+                        'Action': action,
+                        'Rationality': rationality,
+                        'T': self.T,
+                        'R': self.R,
+                        'P': self.P,
+                        'S': self.S,
+                        'reasoning': reasoning
+                    }])
+                    df.to_csv(self.output_file, mode='a', header=not file_exists, index=False)
+                    file_exists = True
+
+# Main execution
+if __name__ == "__main__":
+    T, R, P, S = 5, 3, 1, 0  # Prisoner's Dilemma payoffs
+    models = ["gpt-4.5-preview-2025-02-27"]  # Add more models as needed
+    #"gpt-4.5-preview-2025-02-27", "llama3", "mistral-small", "deepseek-r1", "qwen3", "llama3.3:latest", "deepseek-r1:7b", "mixtral:8x7b"
+    temperature = 0.7
+    iterations = 30
+    output_file = "data/pd/pd.csv"
+    experiment = PDExperiment(
+        models=models,
+        T=T, R=R, P=P, S=S,
+        temperature=temperature,
+        iterations=iterations,
+        output_file=output_file
+    )
+    asyncio.run(experiment.run_experiment())
+    print(f"All experiment results saved to {output_file}")
\ No newline at end of file
diff --git a/src/ring/ring.py b/src/ring/ring.py
index 27ddd447dff4f8537f03d50b1f9cf2aa874e8c32..6bcd09ee48fa870d56ea37338f77e4cf5b9dd28e 100644
--- a/src/ring/ring.py
+++ b/src/ring/ring.py
@@ -28,7 +28,7 @@ class AgentResponse(BaseModel):
     action: Literal["A", "B", "X", "Y"]
     reasoning: str
 
-# The ring game simulation class
+# The ring game2x2 simulation class
 class Ring:
     debug=False
 
@@ -123,11 +123,11 @@ class Ring:
         There are two possible actions that Player {self.other_player_id} can take: {self.other_actions}
         What action do you think {self.other_player_id} will take to maximize their reward and why?
         
-        Analyze the game rules based on your choice, and then infer their optimal choice that maximizes their reward conditionally.
+        Analyze the game2x2 rules based on your choice, and then infer their optimal choice that maximizes their reward conditionally.
         """ if self.use_conditional_reasoning else ""
 
         instruction = f"""
-        We are playing a simple game designed to evaluate your ability to play rationally.
+        We are playing a simple game2x2 designed to evaluate your ability to play rationally.
         Your aim is to maximize your own payoff.
         You are Player {self.player_id}.
 
@@ -198,7 +198,7 @@ class Ring:
                     reasoning = f"Choosing {self.A} because {self.B} is strictly dominated and rational players avoid dominated strategies."
                 else:
                     action = self.X if self.version in ["a", "c", "d"] else self.Y
-                    reasoning = f"Choosing {action} based on the given game structure and expected rational behavior from Player 2."
+                    reasoning = f"Choosing {action} based on the given game2x2 structure and expected rational behavior from Player 2."
         if self.model == "llama3.3:latest":
             XknowingA, XknowingB, YknowingA, YknowingB = (
                 (15, 5, 0, 10) if self.version == "a" else
@@ -209,10 +209,10 @@ class Ring:
             if self.belief == Belief.IMPLICIT:
                 if self.player_id == 1:
                     action = self.X if random.random() < 0.5 else self.Y
-                    reasoning = "Choosing randomly between X and Y since it's an implicit game."
+                    reasoning = "Choosing randomly between X and Y since it's an implicit game2x2."
                 elif self.player_id == 2:
                     action = self.A if random.random() < 0.5 else self.B
-                    reasoning = "Choosing randomly between A and B since it's an implicit game."
+                    reasoning = "Choosing randomly between A and B since it's an implicit game2x2."
             elif self.belief == Belief.EXPLICIT:
                 if self.player_id == 1:
                     action = self.X if XknowingA > YknowingA else self.Y
diff --git a/src/ring/ring_experiments.py b/src/ring/ring_experiments.py
index b49e56ca3cf0cb4f05c75d11ebc19031dfba9070..fa5d4845f0611bf135a3d7fb6048f9a6a92eb14e 100644
--- a/src/ring/ring_experiments.py
+++ b/src/ring/ring_experiments.py
@@ -25,7 +25,7 @@ class RingExperiment:
     async def run_experiment(self):
         beliefs = [Belief.GIVEN, Belief.EXPLICIT, Belief.IMPLICIT]
         file_exists = os.path.isfile(self.output_file)  # Check if file already exists
-        # Run the dictator game for each model and preference
+        # Run the dictator game2x2 for each model and preference
         for model in self.models:
             if self.debug:
                 print(f"Running experiment for model: {model}")
diff --git a/src/rps/rps.py b/src/rps/rps.py
index 0c21a89b22f2cb655bda557f9dd3adb7aacfa9d0..ace11fd38c838ec75ecadd012de83d06b013f773 100644
--- a/src/rps/rps.py
+++ b/src/rps/rps.py
@@ -197,7 +197,7 @@ class RPS:
             if not self.history:
                 # If there is no history, we can't make an educated guess.
                 move = "Scissors"
-                motivations = "No game history available."
+                motivations = "No game2x2 history available."
             opponent_moves = [move['Opponent Move'] for move in self.history]
             move_count = {
                 'Rock': opponent_moves.count('Rock'),
diff --git a/src/ultimatum/proposer.py b/src/ultimatum/proposer.py
index a65202f67ea1fd464125b56984dcc7f4dc98abe9..59707ef2f0a30b6fcbecdee4f1236dc3d0ce0480 100644
--- a/src/ultimatum/proposer.py
+++ b/src/ultimatum/proposer.py
@@ -24,7 +24,7 @@ class AgentResponse(BaseModel):
     other_share: int
     reasoning: str
 
-# The ultimatum game simulation class
+# The ultimatum game2x2 simulation class
 class Proposer:
     def __init__(self, amount: int, model: str, temperature: float, strategy=False, max_retries: int = 3):
         self.debug = False
diff --git a/src/ultimatum/proposer_draw_violin.py b/src/ultimatum/proposer_draw_violin.py
index 9a6efcafcd3547ed351e5c917812626c9b8d0585..4c144af806321bd6419a361e867a3a6be78aeba4 100644
--- a/src/ultimatum/proposer_draw_violin.py
+++ b/src/ultimatum/proposer_draw_violin.py
@@ -66,7 +66,7 @@ plt.ylim(0, 100)
 # Labels and title
 plt.xlabel("Model")
 plt.ylabel("Share of money assigned to oneself")
-plt.title("Distribution of personal share by model in the ultimatum game")
+plt.title("Distribution of personal share by model in the ultimatum game2x2")
 plt.legend()
 
 # Save and display the plot
diff --git a/src/ultimatum/responder.py b/src/ultimatum/responder.py
index 3e78fa41715199e31a66b7f1e0a198463065375f..e0760f826d0f2d1435e0027586b5987d6563c5cb 100644
--- a/src/ultimatum/responder.py
+++ b/src/ultimatum/responder.py
@@ -23,7 +23,7 @@ class AgentResponse(BaseModel):
     response: Literal["Accept", "Reject"]
     reasoning: str
 
-# The ultimatum game simulation class
+# The ultimatum game2x2 simulation class
 class Responder:
     def __init__(self, amount: int, offer: int, model: str, temperature: float, strategy=False, max_retries: int = 3):
         self.debug = False
diff --git a/src/ultimatum/responder_draw_violin.py b/src/ultimatum/responder_draw_violin.py
index 060643e542fdcd611ef40cdf632fba4d33456610..576f31cff7ad2762567d5153835a4e57cd9967f4 100644
--- a/src/ultimatum/responder_draw_violin.py
+++ b/src/ultimatum/responder_draw_violin.py
@@ -52,7 +52,7 @@ plt.ylim(0.0, 1.0)
 # Labels and title
 plt.xlabel("Model")
 plt.ylabel("Acceptance rate")
-plt.title("Distribution of acceptance rate by model in the ultimatum game")
+plt.title("Distribution of acceptance rate by model in the ultimatum game2x2")
 plt.legend("")
 
 # Save and display the plot