diff --git a/docker_config.yaml b/docker_config.yaml
index 71319e01a9a4bd07709294f0ac532a2339f0fc7a..763a19b0f026bbc11dde67ded869ff8a6ee46aab 100644
--- a/docker_config.yaml
+++ b/docker_config.yaml
@@ -1,8 +1,8 @@
 container_name: "androsot_container"
 image_tag: "waveregistry.mipt.ru/starkit/sahr-docker:ros2-androsot"
-workspace_path: "."
+workspace_path: "/home/ivan/androsot_server"
 usb_devices:
-  - "/dev/ttyUSB0"
+  - "/ttyUSB0"
 camera_devices:
-  - "/dev/video2"
-  - "/dev/video4"
+  - "/video2"
+  - "/video4"
diff --git a/run_container.py b/run_container.py
index 1f3df859c540a951fce0b3a0d5eed931067bd626..4f180fa25cc8bc1f42fc8a62dc74fa92b9453fa5 100755
--- a/run_container.py
+++ b/run_container.py
@@ -3,6 +3,18 @@ import os
 import argparse
 import yaml
 import docker
+import subprocess
+
+
+def run_xhost_command():
+    try:
+        result = subprocess.run(
+            ["xhost", "+local:root"], check=True, shell=False, text=True
+        )
+        print("Command executed successfully:")
+        print(result.stdout)
+    except subprocess.CalledProcessError as e:
+        print(f"An error occurred while executing the command: {e}")
 
 
 def main():
@@ -26,7 +38,7 @@ def main():
     image_tag = "waveregistry.mipt.ru/starkit/sahr-docker:ros2-androsot"
     usb_devices = config.get("usb_devices", [])
     camera_devices = config.get("camera_devices", [])
-
+    run_xhost_command()
     client = docker.from_env()
 
     try:
@@ -61,7 +73,7 @@ def main():
     )
 
     print(f"Started new {container_name} container")
-    os.system(f"docker attach {container_name}")
+    os.system(f"sudo docker attach {container_name}")
 
 
 if __name__ == "__main__":
diff --git a/src/approach_simulator/approach_simulator/__init__.py b/src/approach_simulator/approach_simulator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/approach_simulator/approach_simulator/simulator_node.py b/src/approach_simulator/approach_simulator/simulator_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..a573831927a6b4e3b751673d5374b7b815cd852d
--- /dev/null
+++ b/src/approach_simulator/approach_simulator/simulator_node.py
@@ -0,0 +1,257 @@
+import sys
+
+import copy
+import json
+
+import numpy as np
+
+import rclpy
+from rclpy.node import Node
+
+from geometry_msgs.msg import Pose2D
+
+from approach_simulator.world import (
+    World2d,
+    GUI,
+    Robot,
+    Ball,
+    SimulatedObject2d,
+    DirectedShape,
+    Team,
+)
+
+from simple_approach.SimpleApproach import ApproachConfig
+from simple_approach.SimpleApproach import RobotConfig
+from simple_approach.SimpleApproach import SimpleApproach
+from simple_approach.SimplePlanner import SimplePlanner, Obstacle
+
+
+class SimulationNode(Node):
+    def __init__(
+        self, node_name: str, gui: GUI, robot: Robot, target: SimulatedObject2d
+    ) -> None:
+        super().__init__(node_name)
+        self.gui = gui
+        self.robot = robot
+        self.target = target
+
+        self.dt = 0.5
+
+        self.__approach_config_path = str()
+        self.__robot_id = str()
+        self.__declare_params()
+
+        print("initializing simple approach")
+        self.approach = SimpleApproach()
+        print("init call")
+        self.__initialize_approach()
+
+        self.approach.robot.position = Pose2D(
+            x=self.gui.world.objects[1].x,
+            y=self.gui.world.objects[1].y,
+            theta=self.gui.world.objects[1].phi,
+        )
+        self.timer = self.create_timer(0.05, self.timer_callback)
+
+        self.planner = SimplePlanner()
+
+    def timer_callback(self) -> None:
+        drawables = self.gui.world.drawable(
+            self.gui.origin, self.gui.scale_factor.x, self.gui.scale_factor.y
+        )
+
+        drawables[2].color = [0, 0, 0]
+
+        initial_state = copy.deepcopy(self.gui.world.objects[1].state)
+
+        self.approach.robot.position = Pose2D(
+            x=self.robot.x, y=self.robot.y, theta=self.robot.phi
+        )
+
+        expected = Pose2D(
+            x=self.gui.world.objects[2].x,
+            y=self.gui.world.objects[2].y,
+            theta=self.gui.world.objects[2].phi,
+        )
+
+        self.approach.target_position = expected
+
+        target_in_ball_frame = Pose2D(
+            x=expected.x - self.gui.world.objects[0].x,
+            y=expected.y - self.gui.world.objects[0].y,
+            theta=-np.arctan2(
+                expected.y - self.gui.world.objects[0].y,
+                expected.x - self.gui.world.objects[0].x,
+            ),
+        )
+
+        self.gui.world.objects[0].approach_direction = target_in_ball_frame.theta
+
+        self.approach.planner.obstacles.clear()
+        self.approach.planner.add_obstacle(
+            Obstacle(
+                x=self.gui.world.objects[0].x,
+                y=self.gui.world.objects[0].y,
+                radius=0.5,
+                has_weak_area=True,
+                weak_range=1.0,
+                weak_angle=target_in_ball_frame.theta,
+            )
+        )
+        self.approach.planner.update_waypoints(self.approach.robot.position, expected)
+
+        for waypoint in self.approach.planner.waypoints:
+            print(f"Waypoint: {waypoint}", file=sys.stderr)
+            robot_position = Robot(
+                np.array(
+                    [[waypoint.x], [waypoint.y], [waypoint.theta], [0.0], [0.0], [0.0]]
+                ),
+                Team.BLUE,
+            )
+            drawables.append(
+                robot_position.drawable(
+                    self.gui.origin, self.gui.scale_factor.x, self.gui.scale_factor.y
+                )
+            )
+        print(
+            f"Approach direction: {self.gui.world.objects[0].approach_direction}",
+            file=sys.stderr,
+        )
+
+        # print(self.approach.robot.position, file=sys.stderr)
+        # print(self.approach.target_position, file=sys.stderr)
+        # print(self.approach.distance_eps, file=sys.stderr)
+        # print(self.robot.state is self.gui.world.objects[1].state, file=sys.stderr)
+
+        robot_shape = self.robot.drawable(
+            self.gui.origin, self.gui.scale_factor.x, self.gui.scale_factor.y
+        )
+
+        # print(f"Robot position: {self.approach.robot.position}", file=sys.stderr)
+        # print(f"Target position: {expected}", file=sys.stderr)
+        # print(
+        #     f"Goal direction: {self.approach.goal_direction(expected)}", file=sys.stderr
+        # )
+
+        drawables.append(
+            DirectedShape(
+                robot_shape.x,
+                robot_shape.y,
+                np.deg2rad(self.gui.origin[2])
+                + self.approach.goal_direction(expected)
+                + self.robot.phi,
+                direction_multiplier=5.0,
+            )
+        )
+        num_steps = 0
+
+        while num_steps < 500 and not (
+            self.approach.is_near_expected_direction(expected)
+            and self.approach.is_near_expected_position(expected)
+        ):
+            twist = self.approach.next_twist()
+
+            # print(f"Current twist: {twist}", file=sys.stderr)
+            # print(
+            #     f"Angular distance: {angular_distance(self.approach.planner.next_waypoint(), self.approach.robot.position)}",
+            #     file=sys.stderr,
+            # )
+            # print(
+            #     f"Angular distance (goal): {angular_distance(Pose2D(x=self.approach.robot.position.x, y=self.approach.robot.position.x, theta=self.approach.goal_direction(self.approach.planner.next_waypoint()) + self.approach.robot.position.theta), self.approach.robot.position)}",
+            #     file=sys.stderr,
+            # )
+
+            self.robot.state[3][0] = twist.linear.x
+            self.robot.state[4][0] = twist.linear.y
+            self.robot.state[5][0] = twist.angular.z
+
+            # print(self.robot.state, file=sys.stderr)
+
+            self.robot.simulate(self.dt)
+            # self.gui.world.simulate(self.dt)
+            drawables.append(
+                self.robot.drawable(
+                    self.gui.origin, self.gui.scale_factor.x, self.gui.scale_factor.y
+                )
+            )
+
+            self.approach.robot.position = Pose2D(
+                x=self.robot.x, y=self.robot.y, theta=self.robot.phi
+            )
+
+            self.approach.planner.update_waypoints(
+                self.approach.robot.position, expected
+            )
+
+            num_steps += 1
+
+        print(f"Robot position: {self.approach.robot.position}", file=sys.stderr)
+
+        self.gui.visualize(drawables)
+
+        print(self.gui.world.objects[1].x, file=sys.stderr)
+        print(initial_state[0][0], file=sys.stderr)
+
+        self.robot.state = initial_state
+
+    @property
+    def approach_config_path(self) -> str:
+        return (
+            self.get_parameter("approach_config_path")
+            .get_parameter_value()
+            .string_value
+        )
+
+    @property
+    def robot_id(self) -> int:
+        return self.get_parameter("robot_id").get_parameter_value().string_value
+
+    def __declare_params(self) -> None:
+        self.declare_parameter("approach_config_path", self.__approach_config_path)
+        self.declare_parameter("robot_id", self.__robot_id)
+
+    def __initialize_approach(self) -> None:
+        conf = self.__read_approach_config(self.approach_config_path)
+        robot_id = self.robot_id
+
+        self.approach = SimpleApproach(
+            ApproachConfig(**conf[robot_id]["approach"]),
+            RobotConfig(**conf[robot_id]["robot"]),
+        )
+
+    @staticmethod
+    def __read_approach_config(approach_config_path: str) -> dict:
+        print(approach_config_path, file=sys.stderr)
+        with open(approach_config_path) as approach_config_json:
+            conf = json.load(approach_config_json)
+
+        return conf
+
+
+def main(args=None) -> int:
+    rclpy.init(args=args)
+
+    ball = Ball(np.array([[0.0] for _ in range(6)]))
+    robot = Robot(np.array([[0.0] for _ in range(6)]))
+    target = SimulatedObject2d(np.array([[0.0] for _ in range(6)]))
+
+    objects = [
+        Ball(copy.deepcopy(ball.state)),
+        Robot(copy.deepcopy(robot.state)),
+        SimulatedObject2d(copy.deepcopy(target.state)),
+    ]
+
+    w = World2d(3, 4, objects)
+
+    g = GUI(w, 900, 1200)
+    node = SimulationNode("approach_sim", g, robot, target)
+
+    rclpy.spin(node)
+
+    node.destroy_node()
+
+    return 0
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/approach_simulator/approach_simulator/world.py b/src/approach_simulator/approach_simulator/world.py
new file mode 100644
index 0000000000000000000000000000000000000000..721ec01f82cae28a33ee688b4d936d78832fe3df
--- /dev/null
+++ b/src/approach_simulator/approach_simulator/world.py
@@ -0,0 +1,391 @@
+from dataclasses import dataclass
+from enum import Enum
+
+import numpy as np
+
+import cv2
+
+
+@dataclass
+class Point2d:
+    x: float
+    y: float
+
+
+@dataclass
+class OrientedPoint2d:
+    x: float
+    y: float
+    phi: float
+
+
+@dataclass
+class State2d:
+    position: OrientedPoint2d
+    velocity: OrientedPoint2d
+
+
+class DrawableObject:
+    DEFAULT_COLOR = [0, 0, 0]
+    DEFAULT_RADIUS = 10
+
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        radius: int = DEFAULT_RADIUS,
+        color: list[int] = DEFAULT_COLOR,
+    ) -> None:
+        self.x = x
+        self.y = y
+        self.radius = radius
+        self.color = color
+
+    def draw(self, canvas: np.ndarray) -> None:
+        canvas = cv2.circle(canvas, (self.x, self.y), self.radius, self.color, -1)
+
+
+class DirectedShape(DrawableObject):
+    DEFAULT_DIRECTION_MULTIPLIER = 2.0
+    DEFAULT_THICKNESS = 2
+
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        phi: float,
+        radius: int = DrawableObject.DEFAULT_RADIUS,
+        color: list[int] = DrawableObject.DEFAULT_COLOR,
+        direction_multiplier: float = DEFAULT_DIRECTION_MULTIPLIER,
+        thickness: int = DEFAULT_THICKNESS,
+    ) -> None:
+        DrawableObject.__init__(self, x, y, radius, color)
+        self.phi = phi
+        self.direction_multiplier = direction_multiplier
+        self.thickness = thickness
+
+    def draw(self, canvas: np.ndarray) -> None:
+        super().draw(canvas)
+
+        canvas = cv2.line(
+            canvas,
+            (self.x, self.y),
+            (
+                int(
+                    self.x - self.radius * self.direction_multiplier * np.sin(self.phi)
+                ),
+                int(
+                    self.y - self.radius * self.direction_multiplier * np.cos(self.phi)
+                ),
+            ),
+            self.color,
+            self.thickness,
+        )
+
+
+class ObstacleShape(DrawableObject):
+    DEFAULT_OBSTACLE_RADIUS = 60
+    DEFAULT_THICKNESS = 2
+
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        radius: int = DrawableObject.DEFAULT_RADIUS,
+        color: list[int] = DrawableObject.DEFAULT_COLOR,
+        obstacle_radius: int = DEFAULT_OBSTACLE_RADIUS,
+        thickness: int = DEFAULT_THICKNESS,
+    ) -> None:
+        DrawableObject.__init__(self, x, y, radius, color)
+        self.obstacle_radius = obstacle_radius
+        self.thickness = thickness
+
+    def draw(self, canvas: np.ndarray) -> None:
+        super().draw(canvas)
+
+        cv2.circle(
+            canvas, (self.x, self.y), self.obstacle_radius, self.color, self.thickness
+        )
+
+
+class DirectedCutObstacleShape(DrawableObject):
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        cut_mid: float,
+        cut_angle: float,
+        rotation: int,
+        radius: int = DrawableObject.DEFAULT_RADIUS,
+        color: list[int] = DrawableObject.DEFAULT_COLOR,
+        obstacle_radius: int = ObstacleShape.DEFAULT_OBSTACLE_RADIUS,
+        thickness: int = ObstacleShape.DEFAULT_THICKNESS,
+    ) -> None:
+        super().__init__(x, y, radius, color)
+        self.obstacle_radius = obstacle_radius
+        self.thickness = thickness
+        self.cut_mid = cut_mid
+        self.cut_angle = cut_angle
+        self.rotation = rotation
+
+    def draw(self, canvas: np.ndarray) -> None:
+        super().draw(canvas)
+        start_angle = int(
+            np.rad2deg((self.cut_mid - self.cut_angle / 2) % (2.0 * np.pi))
+        )
+        end_angle = start_angle + int(np.rad2deg(self.cut_angle))
+
+        # http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse
+        cv2.ellipse(
+            canvas,
+            (self.x, self.y),
+            (self.obstacle_radius, self.obstacle_radius),
+            self.rotation,
+            end_angle,
+            start_angle + 360,
+            self.color,
+            self.thickness,
+        )
+
+
+class RobotShape(DirectedShape, ObstacleShape):
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        phi: float,
+        radius: int = DrawableObject.DEFAULT_RADIUS,
+        color: list[int] = DrawableObject.DEFAULT_COLOR,
+        direction_multiplier: float = DirectedShape.DEFAULT_DIRECTION_MULTIPLIER,
+        obstacle_radius: int = ObstacleShape.DEFAULT_OBSTACLE_RADIUS,
+        thickness: int = DirectedShape.DEFAULT_THICKNESS,
+    ) -> None:
+        DirectedShape.__init__(
+            self,
+            x,
+            y,
+            phi,
+            radius,
+            color,
+            obstacle_radius / radius * direction_multiplier,
+            thickness,
+        )
+        ObstacleShape.__init__(self, x, y, radius, color, obstacle_radius, thickness)
+
+    def draw(self, canvas: np.ndarray) -> None:
+        super().draw(canvas)
+
+
+class BallShape(DirectedCutObstacleShape):
+    DEFAULT_COLOR = [0, 128, 255]
+
+    def __init__(
+        self,
+        x: int,
+        y: int,
+        cut_mid: float,
+        cut_angle: float,
+        radius: int = DrawableObject.DEFAULT_RADIUS,
+        color: list[int] = DEFAULT_COLOR,
+        obstacle_radius: int = ObstacleShape.DEFAULT_OBSTACLE_RADIUS,
+        thickness: int = ObstacleShape.DEFAULT_THICKNESS,
+    ) -> None:
+        super().__init__(
+            x, y, cut_mid, cut_angle, radius, color, obstacle_radius, thickness
+        )
+
+
+class SimulatedObject2d:
+    def __init__(self, state: np.ndarray) -> None:
+        self.state = state
+
+    @property
+    def x(self):
+        return self.state[0][0]
+
+    @x.setter
+    def x(self, new_x: float) -> None:
+        self.state[0][0] = new_x
+
+    @property
+    def y(self):
+        return self.state[1][0]
+
+    @y.setter
+    def y(self, new_y: float) -> None:
+        self.state[1][0] = new_y
+
+    @property
+    def phi(self):
+        return self.state[2][0]
+
+    @phi.setter
+    def phi(self, new_phi: float) -> None:
+        self.state[2][0] = new_phi
+
+    def simulate(self, dt: float):
+        self.state[: int(self.state.shape[0] / 2), :] += (
+            dt * self.state[int(self.state.shape[0] / 2) :, :]
+        )
+
+    def drawable(self, origin: np.ndarray, x_scale: float, y_scale: float):
+        return DrawableObject(
+            int(origin[0] - x_scale * self.x), int(origin[1] - y_scale * self.y)
+        )
+
+
+class Team(Enum):
+    BLUE = 0
+    RED = 1
+
+
+class Robot(SimulatedObject2d):
+    BLUE_COLOR = [255, 0, 0]
+    RED_COLOR = [0, 0, 255]
+
+    def __init__(self, state: np.ndarray, team: Team = Team.RED) -> None:
+        super().__init__(state)
+        self.team = team
+
+    def simulate(self, dt: float):
+        self.x += dt * (
+            self.state[3][0] * np.cos(self.phi) + self.state[4][0] * np.sin(self.phi)
+        )
+        self.y += dt * (
+            self.state[3][0] * np.sin(self.phi) - self.state[4][0] * np.cos(self.phi)
+        )
+        self.phi += dt * self.state[5][0]
+
+    def drawable(self, origin: np.ndarray, x_scale: float, y_scale: float):
+        return RobotShape(
+            int(origin[0] - x_scale * self.x),
+            int(origin[1] - y_scale * self.y),
+            np.deg2rad(origin[2]) + self.phi,
+            color=self.BLUE_COLOR if self.team == Team.BLUE else self.RED_COLOR,
+        )
+
+
+class Field2d(DrawableObject):
+    GREEN_COLOR_BGR = [20, 130, 57]
+
+    def __init__(self) -> None:
+        super().__init__(0, 0)
+
+    def draw(self, canvas: np.ndarray) -> None:
+        canvas[:, :, 0], canvas[:, :, 1], canvas[:, :, 2] = self.GREEN_COLOR_BGR
+
+
+class Ball(SimulatedObject2d):
+    ORANGE_COLOR = [0, 128, 255]
+
+    def __init__(
+        self,
+        state: np.ndarray,
+        approach_direction: float = None,
+        approach_angle: float = 1.0,
+    ) -> None:
+        super().__init__(state)
+        self.approach_direction = approach_direction
+        self.approach_angle = approach_angle
+
+    def drawable(self, origin: np.ndarray, x_scale: float, y_scale: float):
+        if self.approach_direction is not None:
+            return DirectedCutObstacleShape(
+                int(origin[0] - x_scale * self.x),
+                int(origin[1] - y_scale * self.y),
+                np.deg2rad(origin[2]) + self.approach_direction,
+                self.approach_angle,
+                origin[2],
+                color=self.ORANGE_COLOR,
+            )
+        return DrawableObject(
+            int(origin[0] - x_scale * self.x),
+            int(origin[1] - y_scale * self.y),
+            color=self.ORANGE_COLOR,
+        )
+
+
+class World2d:
+    def __init__(
+        self, x: float, y: float, objects: list[SimulatedObject2d] = []
+    ) -> None:
+        self.size = Point2d(x, y)
+        self.objects = objects
+
+    def add_object(self, new: SimulatedObject2d) -> None:
+        self.objects.append(new)
+
+    def simulate(self, dt: float):
+        for object in self.objects:
+            object.simulate(dt)
+
+    def drawable(
+        self, origin: np.ndarray, x_scale: float, y_scale: float
+    ) -> list[DrawableObject]:
+        result = []
+        result.append(Field2d())
+
+        for object in self.objects:
+            result.append(object.drawable(origin, x_scale, y_scale))
+
+        return result
+
+    def draw(
+        self, canvas: np.ndarray, origin: np.ndarray, x_scale: float, y_scale: float
+    ):
+        for drawable in self.drawable(origin, x_scale, y_scale):
+            drawable.draw(canvas)
+
+
+class GUI:
+    def __init__(self, world: World2d, WIND_X: int, WIND_Y: int) -> None:
+        self.world = world
+
+        self.window_size = Point2d(WIND_X, WIND_Y)
+
+        self.scale_factor = Point2d(
+            self.window_size.y / self.world.size.y,
+            -self.window_size.x / self.world.size.x,
+        )
+
+        self.origin = np.array(
+            [self.window_size.y / 2, self.window_size.x / 2, 90], np.int32
+        )
+
+        self.canvas = np.ones((self.window_size.x, self.window_size.y, 3), np.uint8)
+
+        self.move_object = 0
+        self.x_step = 0.05
+        self.y_step = 0.05
+        self.phi_step = 0.05
+
+    def visualize(self, drawables: list[DrawableObject]) -> None:
+        for drawable in drawables:
+            drawable.draw(self.canvas)
+
+        cv2.imshow("field", self.canvas)
+
+        key = cv2.waitKey(1) & 0xFF
+
+        if chr(key).isdigit():
+            self.move_object = int(chr(key))
+
+        if self.move_object < len(self.world.objects):
+            if key == ord("w"):
+                self.world.objects[self.move_object].x += self.x_step
+            if key == ord("s"):
+                self.world.objects[self.move_object].x -= self.x_step
+
+            # moving ball left and right
+            if key == ord("a"):
+                self.world.objects[self.move_object].y += self.y_step
+            if key == ord("d"):
+                self.world.objects[self.move_object].y -= self.y_step
+
+            if key == ord("q"):
+                self.world.objects[self.move_object].phi += self.phi_step
+            if key == ord("e"):
+                self.world.objects[self.move_object].phi -= self.phi_step
+
+        if key == 27:  # ESC
+            raise Exception("Exit...")
diff --git a/src/approach_simulator/launch/approach_sim.launch.py b/src/approach_simulator/launch/approach_sim.launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f18baa2a01f50d09ba432cf96a49d1336a925a4
--- /dev/null
+++ b/src/approach_simulator/launch/approach_sim.launch.py
@@ -0,0 +1,64 @@
+import json
+import os
+
+from ament_index_python.packages import get_package_share_directory
+
+from launch import LaunchDescription
+
+from launch_ros.actions import Node
+
+
+class ApprouchLaunchCreator:
+    def __init__(self, connections_config_path: str, approach_config_path: str) -> None:
+        self.__connections_config = self.__read_connections_config(
+            connections_config_path
+        )
+        self.__approach_config_path = approach_config_path
+
+    def create_launch(self) -> LaunchDescription:
+        return LaunchDescription(
+            [
+                *self.__create_approach_nodes(),
+            ]
+        )
+
+    @staticmethod
+    def __read_connections_config(connections_config_path: str) -> dict:
+        with open(connections_config_path) as connections_config_json:
+            conf = json.load(
+                connections_config_json,
+                parse_float=lambda x: str(x),
+                parse_int=lambda x: str(x),
+            )
+
+        return conf
+
+    def __create_approach_nodes(self) -> list[Node]:
+        return [
+            Node(
+                package="approach_simulator",
+                executable="simulation",
+                namespace=f"robot_{id}",
+                parameters=[
+                    {"approach_config_path": self.__approach_config_path},
+                    {"robot_id": str(id)},
+                ],
+            )
+            for id in self.__connections_config.keys()
+        ]
+
+
+def generate_launch_description():
+    connections_config_path = os.path.join(
+        get_package_share_directory("server_main"),
+        "config",
+        "connections_config.json",
+    )
+
+    approach_config_path = os.path.join(
+        get_package_share_directory("server_main"), "config", "approach_config.json"
+    )
+
+    return ApprouchLaunchCreator(
+        connections_config_path, approach_config_path
+    ).create_launch()
diff --git a/src/approach_simulator/package.xml b/src/approach_simulator/package.xml
new file mode 100644
index 0000000000000000000000000000000000000000..49f776be3fde524dba1ac087fcefd8b35fb797d3
--- /dev/null
+++ b/src/approach_simulator/package.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
+<package format="3">
+  <name>approach_simulator</name>
+  <version>0.0.0</version>
+  <description>TODO: Package description</description>
+  <maintainer email="root@todo.todo">root</maintainer>
+  <license>TODO: License declaration</license>
+
+  <test_depend>ament_copyright</test_depend>
+  <test_depend>ament_flake8</test_depend>
+  <test_depend>ament_pep257</test_depend>
+  <test_depend>python3-pytest</test_depend>
+
+  <exec_depend>rclcpy</exec_depend>
+
+  <export>
+    <build_type>ament_python</build_type>
+  </export>
+</package>
diff --git a/src/approach_simulator/resource/approach_simulator b/src/approach_simulator/resource/approach_simulator
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/approach_simulator/setup.cfg b/src/approach_simulator/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..e98c94c7114d14522d6736bdd5fb53919af546a9
--- /dev/null
+++ b/src/approach_simulator/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/approach_simulator
+[install]
+install_scripts=$base/lib/approach_simulator
diff --git a/src/approach_simulator/setup.py b/src/approach_simulator/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..86b1593ecc3ef7dceb1d2f6cc69a92ea7ce95056
--- /dev/null
+++ b/src/approach_simulator/setup.py
@@ -0,0 +1,30 @@
+import os
+from glob import glob
+
+from setuptools import find_packages, setup
+
+package_name = "approach_simulator"
+
+setup(
+    name=package_name,
+    version="0.0.0",
+    packages=find_packages(exclude=["test"]),
+    data_files=[
+        ("share/ament_index/resource_index/packages", ["resource/" + package_name]),
+        ("share/" + package_name, ["package.xml"]),
+        (
+            os.path.join("share", package_name, "launch"),
+            glob(os.path.join("launch", "*launch.[pxy][yma]*")),
+        ),
+    ],
+    install_requires=["setuptools"],
+    zip_safe=True,
+    maintainer="root",
+    maintainer_email="root@todo.todo",
+    description="TODO: Package description",
+    license="TODO: License declaration",
+    tests_require=["pytest"],
+    entry_points={
+        "console_scripts": ["simulation = approach_simulator.simulator_node:main"],
+    },
+)
diff --git a/src/approach_simulator/test/test_copyright.py b/src/approach_simulator/test/test_copyright.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceffe896d04b3edf4e4d4802f0edd641fcd9b72d
--- /dev/null
+++ b/src/approach_simulator/test/test_copyright.py
@@ -0,0 +1,27 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(
+    reason="No copyright header has been placed in the generated source file."
+)
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+    rc = main(argv=[".", "test"])
+    assert rc == 0, "Found errors"
diff --git a/src/approach_simulator/test/test_flake8.py b/src/approach_simulator/test/test_flake8.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee79f31ac2f34dcfd98c51d65f49d8bf98fde073
--- /dev/null
+++ b/src/approach_simulator/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+    rc, errors = main_with_errors(argv=[])
+    assert rc == 0, "Found %d code style errors / warnings:\n" % len(
+        errors
+    ) + "\n".join(errors)
diff --git a/src/approach_simulator/test/test_pep257.py b/src/approach_simulator/test/test_pep257.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2c3deb8eb3dea6f1b38db6f62e98859774599ac
--- /dev/null
+++ b/src/approach_simulator/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+    rc = main(argv=[".", "test"])
+    assert rc == 0, "Found code style errors / warnings"
diff --git a/src/calibration_node/calibration_node/rct_test.py b/src/calibration_node/calibration_node/rct_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed9d01e64b19113518813b6517e438ac1ce4b92b
--- /dev/null
+++ b/src/calibration_node/calibration_node/rct_test.py
@@ -0,0 +1,32 @@
+from .runtime_calibraiton import RuntimeCalibration, FieldParam
+
+import numpy as np
+import pytest
+
+
+@pytest.fixture
+def distortion_coef():
+    return np.array(
+        [
+            0.0,
+            0.0,
+            0.0,
+            0.0,
+            0.0,
+            0.0,
+        ]
+    )
+
+
+@pytest.fixture
+def intrinsic():
+    return np.array([[1.0, 0, 500], [0, 1.0, 500], [0, 0, 0]])
+
+
+@pytest.fixture
+def field_param():
+    return FieldParam(15.0, 15.0, 4.0, 3.0)
+
+
+def test_cmoke(distortion_coef, intrinsic, field_param):
+    RuntimeCalibration(intrinsic, distortion_coef, field_param)
diff --git a/src/calibration_node/calibration_node/runtime_calibraiton.py b/src/calibration_node/calibration_node/runtime_calibraiton.py
new file mode 100644
index 0000000000000000000000000000000000000000..e361f5a89442c26e558f7013a4c110bc27c9a29f
--- /dev/null
+++ b/src/calibration_node/calibration_node/runtime_calibraiton.py
@@ -0,0 +1,380 @@
+from dataclasses import dataclass
+import time
+import numpy as np
+import cv2
+import typing as tp
+
+
+@dataclass
+class FieldParam:
+    corner_tag_size_m: float
+    corner_tag_border_m: float
+    field_length_m: float
+    field_width_m: float
+    border_size_m: float = 0.17
+
+
+class RuntimeCalibration:
+    """
+    Handle the camera field calibration
+    """
+
+    def __init__(
+        self, logger, intrinsic, distortion_coef, field_param: FieldParam, aruco_dict_id
+    ) -> None:
+        self.logger = logger  # logging.getLogger("field")
+        self.calibration_status = False
+        self.field_param: FieldParam = field_param
+        self.configure_field()
+        self.intrinsic = intrinsic
+        self.distortion_coef = distortion_coef
+        self.should_calibrate = True
+
+        self.aruco_dictionary = cv2.aruco.Dictionary_get(aruco_dict_id)
+        self.aruco_parameters = cv2.aruco.DetectorParameters_create()
+        self.arucoItems = {
+            # Corners
+            0: ["c1", (128, 128, 0)],
+            1: ["c2", (128, 128, 0)],
+            2: ["c3", (128, 128, 0)],
+            3: ["c4", (128, 128, 0)],
+            4: ["c5", (128, 128, 0)],
+            5: ["c6", (128, 128, 0)],
+        }
+
+    def configure_field(self):
+        self.corner_field_positions = {}
+        for c, sx, sy in (
+            ["c1", 0, 1],
+            ["c2", 0, -1],
+            ["c3", -1, 1],
+            ["c4", -1, -1],
+            ["c5", 1, 1],
+            ["c6", 1, -1],
+        ):
+            cX = sx * (
+                self.field_param.field_length_m / 2
+                + (self.field_param.corner_tag_size_m / 2)
+                + self.field_param.corner_tag_border_m
+            )
+            cY = sy * (
+                self.field_param.field_width_m / 2
+                + (self.field_param.corner_tag_size_m / 2)
+                + self.field_param.corner_tag_border_m
+            )
+
+            self.corner_field_positions[c] = [
+                # Top left
+                (
+                    cX + self.field_param.corner_tag_size_m / 2,
+                    cY + self.field_param.corner_tag_size_m / 2,
+                ),
+                # Top right
+                (
+                    cX + self.field_param.corner_tag_size_m / 2,
+                    cY - self.field_param.corner_tag_size_m / 2,
+                ),
+                # Bottom right
+                (
+                    cX - self.field_param.corner_tag_size_m / 2,
+                    cY - self.field_param.corner_tag_size_m / 2,
+                ),
+                # Bottom left
+                (
+                    cX - self.field_param.corner_tag_size_m / 2,
+                    cY + self.field_param.corner_tag_size_m / 2,
+                ),
+            ]
+
+        # Position of corners on the image
+        self.corner_gfx_positions: dict[str, tp.Any] = {}
+
+        # Is the field calibrated ?
+        self.is_calibrated = False
+
+        # Do we see the whole field ?
+        self.see_whole_field = False
+
+        # Extrinsic (4x4) transformations
+        self.extrinsic_mat = None
+
+        # Camera intrinsic and distortion
+        self.intrinsic = None
+        self.distortion = None
+        self.errors = 0
+
+    def calibrated(self):
+        # recalculate calibration status
+        return self.calibration_status
+
+    def set_corner_position(self, corner: str, corners: list):
+        """
+        Sets the position of a corner
+
+        :param str corner: the corner name (c1, c2, c3 or c4)
+        :param list corners: the corner position
+        """
+        self.corner_gfx_positions[corner] = corners
+
+    def update_calibration(self, image):
+        """
+        If the field should be calibrated, compute a calibration from the detected corners.
+        This will use corner positions previously passed with set_corner_positions.
+
+        :param image: the (OpenCV) image used for calibration
+        """
+        if len(self.corner_gfx_positions) >= 3 and self.should_calibrate:
+            # Computing point-to-point correspondance
+            object_points = []
+            graphics_positions = []
+            for key in self.corner_gfx_positions:
+                for gfx, real in zip(
+                    self.corner_gfx_positions[key], self.corner_field_positions[key]
+                ):
+                    graphics_positions.append(gfx)
+                    object_points.append([*real, 0.0])
+
+            object_points = np.array(object_points, dtype=np.float32)
+            graphics_positions = np.array(graphics_positions, dtype=np.float32)
+
+            # Calibrating camera
+            flags = (
+                cv2.CALIB_USE_INTRINSIC_GUESS
+                + cv2.CALIB_FIX_FOCAL_LENGTH
+                + cv2.CALIB_FIX_PRINCIPAL_POINT
+            )
+
+            # No distortion
+            flags += cv2.CALIB_FIX_TANGENT_DIST
+            flags += (
+                cv2.CALIB_FIX_K1
+                + cv2.CALIB_FIX_K2
+                + cv2.CALIB_FIX_K3
+                + cv2.CALIB_FIX_K4
+                + cv2.CALIB_FIX_K5
+            )
+            self.logger.error(
+                f"intrinsc and distortion, \n\n{self.intrinsic}\n\n\n{self.distortion}"
+            )
+            ret, _, _, rvecs, tvecs = cv2.calibrateCamera(
+                [object_points],
+                [graphics_positions],
+                image.shape[:2][::-1],
+                np.array(self.intrinsic, dtype=np.float32),
+                self.distortion_coef,
+                flags=flags,
+            )
+
+            # Computing extrinsic matrices
+            transformation = np.eye(4)
+            transformation[:3, :3], _ = cv2.Rodrigues(rvecs[0])
+            transformation[:3, 3] = tvecs[0].T
+
+            self.extrinsic_mat_inv = transformation
+
+            self.extrinsic_mat = np.eye(4)
+            self.extrinsic_mat = np.linalg.inv(transformation)
+            # We are now calibrated
+            self.is_calibrated = True
+            self.should_calibrate = False
+            self.errors = 0
+
+            # Checking if we can see the whole fields
+            self.logger.info(f"IMAGE SHAPE{image.shape}")
+            image_height, image_width = image.shape
+            image_points = []
+            self.see_whole_field = True
+            for sx, sy in [(-1, 1), (1, 1), (1, -1), (-1, -1)]:
+                x = sx * (
+                    (self.field_param.field_length_m / 2)
+                    + self.field_param.border_size_m
+                )
+                y = sy * (
+                    (self.field_param.field_width_m / 2)
+                    + self.field_param.border_size_m
+                )
+
+                img = self.position_to_pixel([x, y, 0.0])
+                image_points.append((int(img[0]), int(img[1])))
+
+                if (
+                    img[0] < 0
+                    or img[0] > image_width
+                    or img[1] < 0
+                    or img[1] > image_height
+                ):
+                    self.see_whole_field = False
+
+        # We check that calibration is consistent, this can happen be done with only a few corners
+        # The goal is to avoid recalibrating everytime for performance reasons
+        if len(self.corner_gfx_positions) >= 3:
+            if self.is_calibrated:
+                has_error = False
+                reprojection_errors = []
+                for key in self.corner_gfx_positions:
+                    for gfx, real in zip(
+                        self.corner_gfx_positions[key], self.corner_field_positions[key]
+                    ):
+                        projected_position = self.pixel_to_position(gfx)
+                        reprojection_distance = np.linalg.norm(
+                            np.array(real) - np.array(projected_position)
+                        )
+                        if reprojection_distance > 0.025:
+                            reprojection_errors.append(reprojection_distance)
+                            has_error = True
+
+                if not has_error:
+                    self.errors = 0
+                else:
+                    self.errors += 1
+                    if self.errors > 8:
+                        self.logger.warning("Calibration seems wrong, re-calibrating")
+                        self.logger.warning(
+                            f"Reprojection threshold is 0.025 corner error is {reprojection_errors}"
+                        )
+                        self.should_calibrate = True
+        self.logger.debug(f"HUIHUIHUI!!!!{self.extrinsic_mat}")
+        self.corner_gfx_positions = {}
+
+    def detect_markers(self, image, image_debug=None):
+        (corners, ids, rejected) = cv2.aruco.detectMarkers(
+            image, self.aruco_dictionary, parameters=self.aruco_parameters
+        )
+
+        new_markers = {}
+
+        if len(corners) > 0:
+            for markerCorner, markerID in zip(corners, ids.flatten()):
+                if markerID not in self.arucoItems:
+                    continue
+
+                corners = markerCorner.reshape((4, 2))
+
+                # Draw the bounding box of the ArUCo detection
+                item = self.arucoItems[markerID][0]
+
+                if item[0] == "c":
+                    self.set_corner_position(item, corners)
+
+                if image_debug is not None:
+                    (topLeft, topRight, bottomRight, bottomLeft) = corners
+                    topRight = (int(topRight[0]), int(topRight[1]))
+                    bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
+                    bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
+                    topLeft = (int(topLeft[0]), int(topLeft[1]))
+                    itemColor = self.arucoItems[markerID][1]
+                    if True:
+                        cv2.line(image_debug, topLeft, topRight, itemColor, 2)
+                        cv2.line(image_debug, topRight, bottomRight, itemColor, 2)
+                        cv2.line(image_debug, bottomRight, bottomLeft, itemColor, 2)
+                        cv2.line(image_debug, bottomLeft, topLeft, itemColor, 2)
+
+                        # Compute and draw the center (x, y)-coordinates of the
+                        # ArUco marker
+                        cX = int((topLeft[0] + bottomRight[0]) / 2.0)
+                        cY = int((topLeft[1] + bottomRight[1]) / 2.0)
+                        cv2.circle(image_debug, (cX, cY), 4, (0, 0, 255), -1)
+                        fX = int((topLeft[0] + topRight[0]) / 2.0)
+                        fY = int((topLeft[1] + topRight[1]) / 2.0)
+                        cv2.line(
+                            image_debug,
+                            (cX, cY),
+                            (cX + 2 * (fX - cX), cY + 2 * (fY - cY)),
+                            (0, 0, 255),
+                            2,
+                        )
+
+                        text = item
+                        if item.startswith("blue") or item.startswith("green"):
+                            text = item[-1]
+                        cv2.putText(
+                            image_debug,
+                            text,
+                            (cX - 4, cY + 4),
+                            cv2.FONT_HERSHEY_SIMPLEX,
+                            0.5,
+                            (255, 255, 255),
+                            6,
+                        )
+                        cv2.putText(
+                            image_debug,
+                            text,
+                            (cX - 4, cY + 4),
+                            cv2.FONT_HERSHEY_SIMPLEX,
+                            0.5,
+                            itemColor,
+                            2,
+                        )
+
+                if self.calibrated() and item[0] != "c":
+                    new_markers[item] = self.pose_of_tag(corners)
+                    self.last_updates[item] = time.time()
+
+        self.update_calibration(image)
+
+    def position_to_pixel(self, pos: list) -> list:
+        """
+        Given a position (3D, will be assumed on the ground if 2D), find its position on the screen
+
+        :param list pos: position in field frame (2D or 3D)
+        :return list: position on the screen
+        """
+        if len(pos) == 2:
+            # If no z is provided, assume it is a ground position
+            pos = [*pos, 0.0]
+
+        point_position_camera = self.field_to_camera(pos)
+        position, J = cv2.projectPoints(
+            point_position_camera,
+            np.zeros(3),
+            np.zeros(3),
+            self.intrinsic,
+            self.distortion,
+        )
+        position = position[0][0]
+
+        return [int(position[0]), int(position[1])]
+
+    def field_to_camera(self, point: list) -> np.ndarray:
+        """
+        Transforms a point from field frame to camera frame
+
+        :param list point: point in field frame (3d)
+        :return np.ndarray: point in camera frame (3d)
+        """
+        return (self.extrinsic_mat @ np.array([*point, 1.0]))[:3]
+
+    def camera_to_field(self, point: list) -> np.ndarray:
+        """
+        Transforms a point from camera frame to field frame
+
+        :param list point: point in camera frame (3d)
+        :return np.ndarray: point in field frame (3d)
+        """
+        return (self.extrinsic_mat_inv @ np.array([*point, 1.0]))[:3]
+
+    def pixel_to_position(self, pixel: list, z: float = 0) -> list:
+        """
+        Transforms a pixel on the image to a 3D point on the field, given a z
+
+        :param list pos: pixel
+        :param float z: the height to intersect with, defaults to 0
+        :return list: point coordinates (x, y)
+        """
+
+        # Computing the point position in camera frame
+        point_position_camera = cv2.undistortPoints(
+            np.array(pixel), self.intrinsic, self.distortion
+        )[0][0]
+
+        # Computing the point position in the field frame and solving for given z
+        point_position_field = self.camera_to_field([*point_position_camera, 1.0])
+        camera_center_field = self.camera_to_field(np.array([0.0, 0.0, 0.0]))
+        delta = point_position_field - camera_center_field
+        length = (z - camera_center_field[2]) / delta[2]
+
+        return list(camera_center_field + length * delta)[:2]
+
+    @property
+    def extrinsic(self) -> np.ndarray:
+        return self.extrinsic_mat
diff --git a/src/calibration_node/calibration_node/runtime_calibration_node.py b/src/calibration_node/calibration_node/runtime_calibration_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..92d24729c4683c6803548b08d9963559bdc3f0fa
--- /dev/null
+++ b/src/calibration_node/calibration_node/runtime_calibration_node.py
@@ -0,0 +1,187 @@
+import rclpy
+import rclpy.node
+from rclpy.qos import qos_profile_sensor_data
+from cv_bridge import CvBridge
+import numpy as np
+import cv2
+from scipy.spatial.transform import Rotation as R
+
+from sensor_msgs.msg import CameraInfo
+from sensor_msgs.msg import Image
+from geometry_msgs.msg import PoseArray
+from ros2_aruco_interfaces.msg import ArucoMarkers
+from tf2_ros import TransformBroadcaster
+from geometry_msgs.msg import TransformStamped
+from calibration_node.runtime_calibraiton import RuntimeCalibration, FieldParam
+
+WORLD_FRAME = "world"
+
+
+class RuntimeCalibrationNode(rclpy.node.Node):
+    def __init__(self):
+        super().__init__("runtime_calibration_node")
+        self.declare_parameter("marker_size", 0.17)
+        self.declare_parameter("marker_border_size", 0.0)
+        self.declare_parameter("aruco_dictionary_id", "DICT_5X5_250")
+        self.declare_parameter("image_topic", "/camera/image_raw")
+        self.declare_parameter("camera_info_topic", "/camera/camera_info")
+        self.declare_parameter("field_length", 3.60)
+        self.declare_parameter("field_width", 2.60)
+        self.declare_parameter("camera_frame", None)
+
+        self.marker_size = (
+            self.get_parameter("marker_size").get_parameter_value().double_value
+        )
+        self.border_size = (
+            self.get_parameter("marker_border_size").get_parameter_value().double_value
+        )
+        dictionary_id_name = (
+            self.get_parameter("aruco_dictionary_id").get_parameter_value().string_value
+        )
+        self.camera_frame = (
+            self.get_parameter("camera_frame").get_parameter_value().string_value
+        )
+        self.field_length = (
+            self.get_parameter("field_length").get_parameter_value().double_value
+        )
+        self.field_width = (
+            self.get_parameter("field_width").get_parameter_value().double_value
+        )
+
+        image_topic = f"{self.camera_frame}/image_raw"
+        info_topic = f"{self.camera_frame}/camera_info"
+
+        # Make sure we have a valid dictionary id:
+        # try:
+        self.dictionary_id = cv2.aruco.__getattribute__(dictionary_id_name)
+        #     if isinstance(self.dictionary_id, type(cv2.aruco.DICT_5X5_100)):
+        #         raise AttributeError
+        # except AttributeError:
+        #     self.get_logger().error(
+        #         "bad aruco_dictionary_id: {}".format(dictionary_id_name)
+        #     )
+        #     options = "\n".join([s for s in dir(cv2.aruco) if s.startswith("DICT")])
+        #     self.get_logger().error("valid options: {}".format(options))
+
+        # Set up subscriptions
+        self.info_sub = self.create_subscription(
+            CameraInfo, info_topic, self.info_callback, qos_profile_sensor_data
+        )
+
+        self.create_subscription(
+            Image, image_topic, self.image_callback, qos_profile_sensor_data
+        )
+
+        # Set up publishers
+        self.poses_pub = self.create_publisher(PoseArray, "aruco_poses", 10)
+        self.markers_pub = self.create_publisher(ArucoMarkers, "aruco_markers", 10)
+
+        # Set up fields for camera parameters
+        self.info_msg = None
+        self.intrinsic_mat = None
+        self.distortion = None
+
+        self.bridge = CvBridge()
+        self.tf_broadcaster = TransformBroadcaster(self)
+
+        self.runtime_calibration: RuntimeCalibration | None = None
+
+    def send_tf(self):
+        extrinsic_mat = self.runtime_calibration.extrinsic_mat
+        if extrinsic_mat is not None:
+            t = RuntimeCalibrationNode.matrix_to_transform_stamped(
+                extrinsic_mat,
+                WORLD_FRAME,
+                self.camera_frame,
+                self.get_clock().now().to_msg(),
+            )
+            self.tf_broadcaster.sendTransform(t)
+
+    @staticmethod
+    def matrix_to_transform_stamped(matrix, parent_frame, child_frame, stamp):
+        """
+        Convert a 4x4 transformation matrix to a ROS2 TransformStamped message.
+
+        :param matrix: 4x4 numpy array representing the transformation matrix.
+        :param parent_frame: The parent frame ID.
+        :param child_frame: The child frame ID.
+        :param stamp: The timestamp for the TransformStamped message.
+        :return: A TransformStamped message.
+        """
+        translation = matrix[0:3, 3]
+
+        rot = R.from_matrix(matrix[0:3, 0:3])
+        xyzw = rot.as_quat()
+
+        transform_stamped = TransformStamped()
+        transform_stamped.header.stamp = stamp
+        transform_stamped.header.frame_id = parent_frame
+        transform_stamped.child_frame_id = child_frame
+
+        transform_stamped.transform.translation.x = translation[0]
+        transform_stamped.transform.translation.y = translation[1]
+        transform_stamped.transform.translation.z = translation[2]
+
+        transform_stamped.transform.rotation.x = xyzw[0]
+        transform_stamped.transform.rotation.y = xyzw[1]
+        transform_stamped.transform.rotation.z = xyzw[2]
+        transform_stamped.transform.rotation.w = xyzw[3]
+
+        return transform_stamped
+
+    def init_runtime_calibration(self, info_msg):
+        intrinsic_mat = np.reshape(np.array(info_msg.k), (3, 3))
+        distortion_coef = np.array(info_msg.d)
+
+        self.runtime_calibration = RuntimeCalibration(
+            self.get_logger(),
+            intrinsic_mat,
+            distortion_coef,
+            FieldParam(
+                self.marker_size, self.border_size, self.field_length, self.field_width
+            ),
+            aruco_dict_id=self.dictionary_id,  # FIXME
+        )
+
+    def info_callback(self, info_msg):
+        if info_msg is None:
+            self.get_logger().error("No Info")
+            return
+        self.get_logger().info(f"{info_msg}")
+        self.init_runtime_calibration(info_msg)
+        # Assume that camera parameters will remain the same...
+        self.destroy_subscription(self.info_sub)
+
+    def image_callback(self, img_msg):
+        if self.runtime_calibration is None:
+            self.get_logger().warn("No camera info has been received!")
+            return
+
+        cv_image = self.bridge.imgmsg_to_cv2(img_msg, desired_encoding="mono8")
+
+        markers = ArucoMarkers()
+        pose_array = PoseArray()
+
+        if self.camera_frame is None:
+            markers.header.frame_id = self.info_msg.header.frame_id
+            pose_array.header.frame_id = self.info_msg.header.frame_id
+        else:
+            markers.header.frame_id = self.camera_frame
+            pose_array.header.frame_id = self.camera_frame
+
+        markers.header.stamp = img_msg.header.stamp
+        pose_array.header.stamp = img_msg.header.stamp
+        self.runtime_calibration.detect_markers(cv_image)
+        if self.runtime_calibration.extrinsic is not None:
+            self.send_tf()
+
+
+def main():
+    rclpy.init()
+    node = RuntimeCalibrationNode()
+    try:
+        rclpy.spin(node)
+    except KeyboardInterrupt:
+        pass
+
+    rclpy.shutdown()
diff --git a/src/calibration_node/launch/multi_camera_calibration.launch.py b/src/calibration_node/launch/multi_camera_calibration.launch.py
index f311d2dd5da123a17df9b67ff0e91bc50687557f..bd6b779b8f3ac7b5bde126c8ef10615ddd7503ab 100644
--- a/src/calibration_node/launch/multi_camera_calibration.launch.py
+++ b/src/calibration_node/launch/multi_camera_calibration.launch.py
@@ -11,7 +11,12 @@ def generate_launch_description():
     camera_config2 = os.path.join(
         get_package_share_directory("server_main"), "config", "camera_params2.yaml"
     )
-
+    # os.path.join(
+    #     get_package_share_directory("server_main"), "config", "camera_params_test.yaml"
+    # )
+    # camera_config2 = os.path.join(
+    #     get_package_share_directory("server_main"), "config", "camera_params_test.yaml"
+    # )
     return LaunchDescription(
         [
             Node(
diff --git a/src/calibration_node/launch/runtime_calibration.launch.py b/src/calibration_node/launch/runtime_calibration.launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..eda5b45ba45f642c13b9d332cc761503a36aa272
--- /dev/null
+++ b/src/calibration_node/launch/runtime_calibration.launch.py
@@ -0,0 +1,64 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+import os
+from ament_index_python.packages import get_package_share_directory
+
+
+def generate_launch_description():
+    camera_config = os.path.join(
+        get_package_share_directory("server_main"), "config", "camera_params.yaml"
+    )
+    camera_config2 = os.path.join(
+        get_package_share_directory("server_main"), "config", "camera_params2.yaml"
+    )
+
+    return LaunchDescription(
+        [
+            Node(
+                package="usb_cam_ros2",
+                executable="usb_cam_ros2_node_exe",
+                parameters=[camera_config],
+                output="screen",
+                namespace="camera1",
+                emulate_tty=True,
+            ),
+            Node(
+                package="calibration_node",
+                executable="runtime_calibration_node",
+                parameters=[
+                    {"camera_frame": "camera1"},
+                    {"aruco_dictionary_id": "DICT_5X5_100"},
+                    {"marker_size": 0.170},  # 0.07
+                ],
+                remappings=[
+                    ("/camera/image_raw", "/camera1/image_raw"),
+                    ("/camera/camera_info", "/camera1/camera_info"),
+                ],
+                output="screen",
+                emulate_tty=True,
+            ),
+            Node(
+                package="usb_cam_ros2",
+                executable="usb_cam_ros2_node_exe",
+                parameters=[camera_config2],
+                output="screen",
+                namespace="camera2",
+                emulate_tty=True,
+            ),
+            Node(
+                package="calibration_node",
+                executable="runtime_calibration_node",
+                parameters=[
+                    {"camera_frame": "camera2"},
+                    {"aruco_dictionary_id": "DICT_5X5_100"},
+                    {"marker_size": 0.170},  # 0.07
+                ],
+                remappings=[
+                    ("/camera/image_raw", "/camera2/image_raw"),
+                    ("/camera/camera_info", "/camera2/camera_info"),
+                ],
+                output="screen",
+                emulate_tty=True,
+            ),
+        ]
+    )
diff --git a/src/calibration_node/launch/single_camera_calibration.launch.py b/src/calibration_node/launch/single_camera_calibration.launch.py
index e9ccecd29c8b5595afb6e260d34da571da937eec..25bcc580faad14cdadfdf2e1e752b1a363668817 100644
--- a/src/calibration_node/launch/single_camera_calibration.launch.py
+++ b/src/calibration_node/launch/single_camera_calibration.launch.py
@@ -16,7 +16,7 @@ def generate_launch_description():
                 parameters=[
                     {"camera_frame": "camera1"},
                     {"aruco_dictionary_id": "DICT_4X4_100"},
-                    {"marker_size": 0.070},  # 0.17
+                    {"marker_size": 0.160},  # 0.17
                 ],
                 remappings=[
                     # ('/camera/image_raw', '/image_raw'),
diff --git a/src/calibration_node/launch/tf.launch.py b/src/calibration_node/launch/tf.launch.py
index f917d94e3f3c56ddd52ba8cce592ec77ee4c6917..b0695c7d1754bcbef32753a39b6fe683d5e5d8b7 100644
--- a/src/calibration_node/launch/tf.launch.py
+++ b/src/calibration_node/launch/tf.launch.py
@@ -1,60 +1,109 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-import os
-from ament_index_python.packages import get_package_share_directory
-import yaml
+# from launch import LaunchDescription
+# from launch_ros.actions import Node
+# import os
+# from ament_index_python.packages import get_package_share_directory
+# import yaml
 
-odom_frame_string = """- child_frame_id: odom
-  name: odom
-  parent_frame_id: world
-  rotation:
-  - 0.0
-  - 0.0
-  - 0.0
-  - -0.0
-  translation:
-  - 0.0
-  - 0.0
-  - 0.0
+# odom_frame_string = """- child_frame_id: odom
+#   name: odom
+#   parent_frame_id: world
+#   rotation:
+#   - 0.0
+#   - 0.0
+#   - 0.0
+#   - -0.0
+#   translation:
+#   - 0.0
+#   - 0.0
+#   - 0.0
 
-"""
+# """
 
 
-def load_yaml_file(file_path):
-    with open(file_path, "r") as file:
-        camera_tfs = file.read()
-        tfs = camera_tfs + odom_frame_string
-        return yaml.safe_load(tfs)
+# def load_yaml_file(file_path):
+#     with open(file_path, "r") as file:
+#         camera_tfs = file.read()
+#         tfs = camera_tfs + odom_frame_string
+#         return yaml.safe_load(tfs)
 
 
-def generate_launch_description():
-    ld = LaunchDescription()
+# def generate_launch_description():
+#     ld = LaunchDescription()
 
-    # Get the path to the YAML file
-    config_path = os.path.join(
-        get_package_share_directory("calibration_node"),
-        "config",
-        "static_transforms.yaml",
+#     # Get the path to the YAML file
+#     config_path = os.path.join(
+#         get_package_share_directory("calibration_node"),
+#         "config",
+#         "static_transforms.yaml",
+#     )
+
+#     static_transforms = load_yaml_file(config_path)
+
+#     for transform in static_transforms["static_transforms"]:
+#         node = Node(
+#             package="tf2_ros",
+#             executable="static_transform_publisher",
+#             name=transform["child_frame_id"],
+#             arguments=[
+#                 str(transform["translation"][0]),
+#                 str(transform["translation"][1]),
+#                 str(transform["translation"][2]),
+#                 str(transform["rotation"][0]),
+#                 str(transform["rotation"][1]),
+#                 str(transform["rotation"][2]),
+#                 str(transform["rotation"][3]),
+#                 transform["parent_frame_id"],
+#                 transform["child_frame_id"],
+#             ],
+#         )
+#         ld.add_action(node)
+#     return ld
+
+from launch import LaunchDescription
+from launch_ros.actions import Node
+import os
+from ament_index_python.packages import get_package_share_directory
+
+
+def generate_launch_description():
+    os.path.join(
+        get_package_share_directory("server_main"), "config", "camera_params.yaml"
+    )
+    os.path.join(
+        get_package_share_directory("server_main"), "config", "camera_params2.yaml"
     )
 
-    static_transforms = load_yaml_file(config_path)
-
-    for transform in static_transforms["static_transforms"]:
-        node = Node(
-            package="tf2_ros",
-            executable="static_transform_publisher",
-            name=transform["child_frame_id"],
-            arguments=[
-                str(transform["translation"][0]),
-                str(transform["translation"][1]),
-                str(transform["translation"][2]),
-                str(transform["rotation"][0]),
-                str(transform["rotation"][1]),
-                str(transform["rotation"][2]),
-                str(transform["rotation"][3]),
-                transform["parent_frame_id"],
-                transform["child_frame_id"],
-            ],
-        )
-        ld.add_action(node)
-    return ld
+    return LaunchDescription(
+        [
+            Node(
+                package="calibration_node",
+                executable="runtime_calibration_node",
+                parameters=[
+                    {"camera_frame": "camera1"},
+                    {"aruco_dictionary_id": "DICT_5X5_100"},
+                    {"marker_size": 0.170},  # 0.07
+                ],
+                remappings=[
+                    ("/camera/image_raw", "/camera1/image_raw"),
+                    ("/camera/camera_info", "/camera1/camera_info"),
+                ],
+                output="screen",
+                emulate_tty=True,
+            ),
+            Node(
+                package="calibration_node",
+                executable="runtime_calibration_node",
+                parameters=[
+                    {"camera_frame": "camera2"},
+                    {"aruco_dictionary_id": "DICT_5X5_100"},
+                    {"marker_size": 0.170},  # 0.07
+                ],
+                remappings=[
+                    ("/camera/image_raw", "/camera2/image_raw"),
+                    ("/camera/camera_info", "/camera2/camera_info"),
+                ],
+                output="screen",
+                emulate_tty=True,
+            ),
+        ]
+    )
diff --git a/src/calibration_node/setup.py b/src/calibration_node/setup.py
index 1eb08325c3d6eec434a3dee6c47aea301fc1edff..5866d88d42689e613e26b887868637a1380f5169 100644
--- a/src/calibration_node/setup.py
+++ b/src/calibration_node/setup.py
@@ -28,6 +28,9 @@ setup(
     license="TODO: License declaration",
     tests_require=["pytest"],
     entry_points={
-        "console_scripts": ["calibration_node = calibration_node.calibration:main"],
+        "console_scripts": [
+            "calibration_node = calibration_node.calibration:main",
+            "runtime_calibration_node = calibration_node.runtime_calibration_node:main",
+        ],
     },
 )
diff --git a/src/localisation/config/ekf_old_rosbag.yaml b/src/localisation/config/androsot.yaml
similarity index 99%
rename from src/localisation/config/ekf_old_rosbag.yaml
rename to src/localisation/config/androsot.yaml
index e113223e34b8b326c7cccc97ada0bc72ed4a7775..1d05f11ed3e2a940db9a7163154212ed34260c65 100644
--- a/src/localisation/config/ekf_old_rosbag.yaml
+++ b/src/localisation/config/androsot.yaml
@@ -4,7 +4,7 @@ ekf_filter_node:
         # The frequency, in Hz, at which the filter will output a position estimate. Note that the filter will not begin
         # computation until it receives at least one message from one of the inputs. It will then run continuously at the
         # frequency specified here, regardless of whether it receives more measurements. Defaults to 30 if unspecified.
-        frequency: 5.0
+        frequency: 30.0
 
         # The period, in seconds, after which we consider a sensor to have timed out. In this event, we carry out a predict
         # cycle on the EKF without correcting it. This parameter can be thought of as the minimum frequency with which the
@@ -45,7 +45,7 @@ ekf_filter_node:
         publish_acceleration: false
 
         # Whether to broadcast the transformation over the /tf topic. Defaults to true if unspecified.
-        publish_tf: true
+        publish_tf: false
 
         # REP-105 (http://www.ros.org/reps/rep-0105.html) specifies four principal coordinate frames: base_link, odom, map, and
         # earth. base_link is the coordinate frame that is affixed to the robot. Both odom and map are world-fixed frames.
@@ -139,7 +139,7 @@ ekf_filter_node:
         # odom1_queue_size: 2
         # odom1_pose_rejection_threshold: 2.0
         # odom1_twist_rejection_threshold: 0.2
-        pose0: /pose_const_cov
+        pose0: /pose
         pose0_config: [true,  true,  true,
                        true, true, true,
                        false, false, false,
@@ -213,7 +213,6 @@ ekf_filter_node:
         # If your robot cannot instantaneously reach its deceleration limit, the permitted change can be controlled with these
         # gains
         # deceleration_gains: [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
-
         # [ADVANCED] The process noise covariance matrix can be difficult to tune, and can vary for each application, so it is
         # exposed as a configuration parameter. This matrix represents the noise we add to the total error after each
         # prediction step. The better the omnidirectional motion model matches your system, the smaller these values can be.
diff --git a/src/localisation/config/ekf.yaml b/src/localisation/config/ekf.yaml
deleted file mode 100644
index 3e2058e7e4713dc63fee8e4ca9050379db23fd2e..0000000000000000000000000000000000000000
--- a/src/localisation/config/ekf.yaml
+++ /dev/null
@@ -1,248 +0,0 @@
-### ekf config file ###
-ekf_filter_node:
-    ros__parameters:
-        # The frequency, in Hz, at which the filter will output a position estimate. Note that the filter will not begin
-        # computation until it receives at least one message from one of the inputs. It will then run continuously at the
-        # frequency specified here, regardless of whether it receives more measurements. Defaults to 30 if unspecified.
-        frequency: 5.0
-
-        # The period, in seconds, after which we consider a sensor to have timed out. In this event, we carry out a predict
-        # cycle on the EKF without correcting it. This parameter can be thought of as the minimum frequency with which the
-        # filter will generate new output. Defaults to 1 / frequency if not specified.
-        sensor_timeout: 1.0
-
-        # ekf_localization_node and ukf_localization_node both use a 3D omnidirectional motion model. If this parameter is
-        # set to true, no 3D information will be used in your state estimate. Use this if you are operating in a planar
-        # environment and want to ignore the effect of small variations in the ground plane that might otherwise be detected
-        # by, for example, an IMU. Defaults to false if unspecified.
-        two_d_mode: false
-
-        # Use this parameter to provide an offset to the transform generated by ekf_localization_node. This can be used for
-        # future dating the transform, which is required for interaction with some other packages. Defaults to 0.0 if
-        # unspecified.
-        transform_time_offset: 0.0
-
-        # Use this parameter to provide specify how long the tf listener should wait for a transform to become available.
-        # Defaults to 0.0 if unspecified.
-        transform_timeout: 0.0
-
-        # If you're having trouble, try setting this to true, and then echo the /diagnostics_agg topic to see if the node is
-        # unhappy with any settings or data.
-        print_diagnostics: true
-
-        # Debug settings. Not for the faint of heart. Outputs a ludicrous amount of information to the file specified by
-        # debug_out_file. I hope you like matrices! Please note that setting this to true will have strongly deleterious
-        # effects on the performance of the node. Defaults to false if unspecified.
-        debug: false
-
-        # Defaults to "robot_localization_debug.txt" if unspecified. Please specify the full path.
-        debug_out_file: /path/to/debug/file.txt
-
-        # Whether we'll allow old measurements to cause a re-publication of the updated state
-        permit_corrected_publication: false
-
-        # Whether to publish the acceleration state. Defaults to false if unspecified.
-        publish_acceleration: false
-
-        # Whether to broadcast the transformation over the /tf topic. Defaults to true if unspecified.
-        publish_tf: true
-
-        # REP-105 (http://www.ros.org/reps/rep-0105.html) specifies four principal coordinate frames: base_link, odom, map, and
-        # earth. base_link is the coordinate frame that is affixed to the robot. Both odom and map are world-fixed frames.
-        # The robot's position in the odom frame will drift over time, but is accurate in the short term and should be
-        # continuous. The odom frame is therefore the best frame for executing local motion plans. The map frame, like the odom
-        # frame, is a world-fixed coordinate frame, and while it contains the most globally accurate position estimate for your
-        # robot, it is subject to discrete jumps, e.g., due to the fusion of GPS data or a correction from a map-based
-        # localization node. The earth frame is used to relate multiple map frames by giving them a common reference frame.
-        # ekf_localization_node and ukf_localization_node are not concerned with the earth frame.
-        # Here is how to use the following settings:
-        # 1. Set the map_frame, odom_frame, and base_link frames to the appropriate frame names for your system.
-        #     1a. If your system does not have a map_frame, just remove it, and make sure "world_frame" is set to the value of
-        #         odom_frame.
-        # 2. If you are fusing continuous position data such as wheel encoder odometry, visual odometry, or IMU data, set
-        #   "world_frame" to your odom_frame value. This is the default behavior for robot_localization's state estimation nodes.
-        # 3. If you are fusing global absolute position data that is subject to discrete jumps (e.g., GPS or position updates
-        # from landmark observations) then:
-        #     3a. Set your "world_frame" to your map_frame value
-        #     3b. MAKE SURE something else is generating the odom->base_link transform. Note that this can even be another state
-        #         estimation node from robot_localization! However, that instance should *not* fuse the global data.
-        map_frame: map              # Defaults to "map" if unspecified
-        odom_frame: odom            # Defaults to "odom" if unspecified
-        base_link_frame: base_link  # Defaults to "base_link" if unspecified
-        world_frame: map          # Defaults to the value of odom_frame if unspecified
-
-        # The filter accepts an arbitrary number of inputs from each input message type (nav_msgs/Odometry,
-        # geometry_msgs/PoseWithCovarianceStamped, geometry_msgs/TwistWithCovarianceStamped,
-        # sensor_msgs/Imu). To add an input, simply append the next number in the sequence to its "base" name, e.g., odom0,
-        # odom1, twist0, twist1, imu0, imu1, imu2, etc. The value should be the topic name. These parameters obviously have no
-        # default values, and must be specified.
-        # odom0: /dummy_odom
-
-        # Each sensor reading updates some or all of the filter's state. These options give you greater control over which
-        # values from each measurement are fed to the filter. For example, if you have an odometry message as input, but only
-        # want to use its Z position value, then set the entire vector to false, except for the third entry. The order of the
-        # values is x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Note that not some message types
-        # do not provide some of the state variables estimated by the filter. For example, a TwistWithCovarianceStamped message
-        # has no pose information, so the first six values would be meaningless in that case. Each vector defaults to all false
-        # if unspecified, effectively making this parameter required for each sensor.
-        # odom0_config: [true,  true,  true,
-        #                true, true, true,
-        #                false, false, false,
-        #                false, false, true,
-        #                false, false, false]
-
-        # If you have high-frequency data or are running with a low frequency parameter value, then you may want to increase
-        # the size of the subscription queue so that more measurements are fused.
-        # odom0_queue_size: 2
-
-        # [ADVANCED] When measuring one pose variable with two sensors, a situation can arise in which both sensors under-
-        # report their covariances. This can lead to the filter rapidly jumping back and forth between each measurement as they
-        # arrive. In these cases, it often makes sense to (a) correct the measurement covariances, or (b) if velocity is also
-        # measured by one of the sensors, let one sensor measure pose, and the other velocity. However, doing (a) or (b) isn't
-        # always feasible, and so we expose the differential parameter. When differential mode is enabled, all absolute pose
-        # data is converted to velocity data by differentiating the absolute pose measurements. These velocities are then
-        # integrated as usual. NOTE: this only applies to sensors that provide pose measurements; setting differential to true
-        # for twist measurements has no effect.
-        # odom0_differential: false
-
-        # [ADVANCED] When the node starts, if this parameter is true, then the first measurement is treated as a "zero point"
-        # for all future measurements. While you can achieve the same effect with the differential paremeter, the key
-        # difference is that the relative parameter doesn't cause the measurement to be converted to a velocity before
-        # integrating it. If you simply want your measurements to start at 0 for a given sensor, set this to true.
-        # odom0_relative: false
-
-        # [ADVANCED] Whether to use the starting pose of child_frame_id as the origin of odometry.
-        # Note: this is different from setting odom0_relative to true, as when child_frame is different from
-        # base_link_frame, the rotation of base_link will be coupled into the translation of child_frame.
-        # Set to true for fusing secondary odometry sources that are rigidly connected to base_link but has a non-zero
-        # offset from base_link.
-        # odom0_pose_use_child_frame: false
-
-        # [ADVANCED] If your data is subject to outliers, use these threshold settings, expressed as Mahalanobis distances, to
-        # control how far away from the current vehicle state a sensor measurement is permitted to be. Each defaults to
-        # numeric_limits<double>::max() if unspecified. It is strongly recommended that these parameters be removed if not
-        # required. Data is specified at the level of pose and twist variables, rather than for each variable in isolation.
-        # For messages that have both pose and twist data, the parameter specifies to which part of the message we are applying
-        # the thresholds.
-        # odom0_pose_rejection_threshold: 5.0
-        # odom0_twist_rejection_threshold: 1.0
-
-        # Further input parameter examples
-        # odom1: example/odom2
-        # odom1_config: [false, false, true,
-        #                false, false, false,
-        #                false, false, false,
-        #                false, false, true,
-        #                false, false, false]
-        # odom1_differential: false
-        # odom1_relative: true
-        # odom1_queue_size: 2
-        # odom1_pose_rejection_threshold: 2.0
-        # odom1_twist_rejection_threshold: 0.2
-        pose0: /pose
-        pose0_config: [true,  true,  true,
-                       true, true, true,
-                       false, false, false,
-                       false, false, false,
-                       false, false, false]
-        pose0_differential: false
-        pose0_relative: false
-        pose0_queue_size: 5
-        pose0_rejection_threshold: 2.0  # Note the difference in parameter name
-
-        # twist0: example/twist
-        # twist0_config: [false, false, false,
-        #                 false, false, false,
-        #                 true,  true,  true,
-        #                 false, false, false,
-        #                 false, false, false]
-        # twist0_queue_size: 3
-        # twist0_rejection_threshold: 2.0
-
-        # imu0: example/imu
-        # imu0_config: [false, false, false,
-        #               true,  true,  true,
-        #               false, false, false,
-        #               true,  true,  true,
-        #               true,  true,  true]
-        # imu0_differential: false
-        # imu0_relative: true
-        # imu0_queue_size: 5
-        # imu0_pose_rejection_threshold: 0.8                 # Note the difference in parameter names
-        # imu0_twist_rejection_threshold: 0.8                #
-        # imu0_linear_acceleration_rejection_threshold: 0.8  #
-
-        # [ADVANCED] Some IMUs automatically remove acceleration due to gravity, and others don't. If yours doesn't, please set
-        # this to true, and *make sure* your data conforms to REP-103, specifically, that the data is in ENU frame.
-        # imu0_remove_gravitational_acceleration: true
-
-        # [ADVANCED]  The EKF and UKF models follow a standard predict/correct cycle. During prediction, if there is no
-        # acceleration reference, the velocity at time t+1 is simply predicted to be the same as the velocity at time t. During
-        # correction, this predicted value is fused with the measured value to produce the new velocity estimate. This can be
-        # problematic, as the final velocity will effectively be a weighted average of the old velocity and the new one. When
-        # this velocity is the integrated into a new pose, the result can be sluggish covergence. This effect is especially
-        # noticeable with LIDAR data during rotations. To get around it, users can try inflating the process_noise_covariance
-        # for the velocity variable in question, or decrease the  variance of the variable in question in the measurement
-        # itself. In addition, users can also take advantage of the control command being issued to the robot at the time we
-        # make the prediction. If control is used, it will get converted into an acceleration term, which will be used during
-        # predicition. Note that if an acceleration measurement for the variable in question is available from one of the
-        # inputs, the control term will be ignored.
-        # Whether or not we use the control input during predicition. Defaults to false.
-        use_control: false
-
-        # Whether the input (assumed to be cmd_vel) is a geometry_msgs/Twist or geometry_msgs/TwistStamped message. Defaults to
-        # false.
-        # stamped_control: false
-
-        # The last issued control command will be used in prediction for this period. Defaults to 0.2.
-        # control_timeout: 0.2
-
-        # Which velocities are being controlled. Order is vx, vy, vz, vroll, vpitch, vyaw.
-        # control_config: [true, false, false, false, false, true]
-
-        # Places limits on how large the acceleration term will be. Should match your robot's kinematics.
-        # acceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 3.4]
-
-        # Acceleration and deceleration limits are not always the same for robots.
-        # deceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 4.5]
-
-        # If your robot cannot instantaneously reach its acceleration limit, the permitted change can be controlled with these
-        # gains
-        # acceleration_gains: [0.8, 0.0, 0.0, 0.0, 0.0, 0.9]
-
-        # If your robot cannot instantaneously reach its deceleration limit, the permitted change can be controlled with these
-        # gains
-        # deceleration_gains: [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
-
-        # [ADVANCED] The process noise covariance matrix can be difficult to tune, and can vary for each application, so it is
-        # exposed as a configuration parameter. This matrix represents the noise we add to the total error after each
-        # prediction step. The better the omnidirectional motion model matches your system, the smaller these values can be.
-        # However, if users find that a given variable is slow to converge, one approach is to increase the
-        # process_noise_covariance diagonal value for the variable in question, which will cause the filter's predicted error
-        # to be larger, which will cause the filter to trust the incoming measurement more during correction. The values are
-        # ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the matrix below if
-        # unspecified.
-        # Note: the specification of covariance matrices can be cumbersome, so all matrix parameters in this package support
-        # both full specification or specification of only the diagonal values.
-        process_noise_covariance: [0.05, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.05, 0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.06, 0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.03, 0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.03, 0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.06, 0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.025, 0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.025, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.04, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.01, 0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.01, 0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.02, 0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.01, 0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.01, 0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.015]
-
-        # [ADVANCED] This represents the initial value for the state estimate error covariance matrix. Setting a diagonal
-        # value (variance) to a large value will result in rapid convergence for initial measurements of the variable in
-        # question. Users should take care not to use large values for variables that will not be measured directly. The values
-        # are ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the diagonal values below
-        # if unspecified. In this example, we specify only the diagonal of the matrix.
-        initial_estimate_covariance: [1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9]
diff --git a/src/localisation/config/ekf_1.yaml b/src/localisation/config/ekf_1.yaml
deleted file mode 100644
index f97fcaaae323adc3be874e92b80d59921b817151..0000000000000000000000000000000000000000
--- a/src/localisation/config/ekf_1.yaml
+++ /dev/null
@@ -1,248 +0,0 @@
-### ekf config file ###
-ekf_filter_node:
-    ros__parameters:
-        # The frequency, in Hz, at which the filter will output a position estimate. Note that the filter will not begin
-        # computation until it receives at least one message from one of the inputs. It will then run continuously at the
-        # frequency specified here, regardless of whether it receives more measurements. Defaults to 30 if unspecified.
-        frequency: 30.0
-
-        # The period, in seconds, after which we consider a sensor to have timed out. In this event, we carry out a predict
-        # cycle on the EKF without correcting it. This parameter can be thought of as the minimum frequency with which the
-        # filter will generate new output. Defaults to 1 / frequency if not specified.
-        sensor_timeout: 0.1
-
-        # ekf_localization_node and ukf_localization_node both use a 3D omnidirectional motion model. If this parameter is
-        # set to true, no 3D information will be used in your state estimate. Use this if you are operating in a planar
-        # environment and want to ignore the effect of small variations in the ground plane that might otherwise be detected
-        # by, for example, an IMU. Defaults to false if unspecified.
-        two_d_mode: false
-
-        # Use this parameter to provide an offset to the transform generated by ekf_localization_node. This can be used for
-        # future dating the transform, which is required for interaction with some other packages. Defaults to 0.0 if
-        # unspecified.
-        transform_time_offset: 0.0
-
-        # Use this parameter to provide specify how long the tf listener should wait for a transform to become available.
-        # Defaults to 0.0 if unspecified.
-        transform_timeout: 0.0
-
-        # If you're having trouble, try setting this to true, and then echo the /diagnostics_agg topic to see if the node is
-        # unhappy with any settings or data.
-        print_diagnostics: true
-
-        # Debug settings. Not for the faint of heart. Outputs a ludicrous amount of information to the file specified by
-        # debug_out_file. I hope you like matrices! Please note that setting this to true will have strongly deleterious
-        # effects on the performance of the node. Defaults to false if unspecified.
-        debug: false
-
-        # Defaults to "robot_localization_debug.txt" if unspecified. Please specify the full path.
-        debug_out_file: /path/to/debug/file.txt
-
-        # Whether we'll allow old measurements to cause a re-publication of the updated state
-        permit_corrected_publication: false
-
-        # Whether to publish the acceleration state. Defaults to false if unspecified.
-        publish_acceleration: false
-
-        # Whether to broadcast the transformation over the /tf topic. Defaults to true if unspecified.
-        publish_tf: true
-
-        # REP-105 (http://www.ros.org/reps/rep-0105.html) specifies four principal coordinate frames: base_link, odom, map, and
-        # earth. base_link is the coordinate frame that is affixed to the robot. Both odom and map are world-fixed frames.
-        # The robot's position in the odom frame will drift over time, but is accurate in the short term and should be
-        # continuous. The odom frame is therefore the best frame for executing local motion plans. The map frame, like the odom
-        # frame, is a world-fixed coordinate frame, and while it contains the most globally accurate position estimate for your
-        # robot, it is subject to discrete jumps, e.g., due to the fusion of GPS data or a correction from a map-based
-        # localization node. The earth frame is used to relate multiple map frames by giving them a common reference frame.
-        # ekf_localization_node and ukf_localization_node are not concerned with the earth frame.
-        # Here is how to use the following settings:
-        # 1. Set the map_frame, odom_frame, and base_link frames to the appropriate frame names for your system.
-        #     1a. If your system does not have a map_frame, just remove it, and make sure "world_frame" is set to the value of
-        #         odom_frame.
-        # 2. If you are fusing continuous position data such as wheel encoder odometry, visual odometry, or IMU data, set
-        #   "world_frame" to your odom_frame value. This is the default behavior for robot_localization's state estimation nodes.
-        # 3. If you are fusing global absolute position data that is subject to discrete jumps (e.g., GPS or position updates
-        # from landmark observations) then:
-        #     3a. Set your "world_frame" to your map_frame value
-        #     3b. MAKE SURE something else is generating the odom->base_link transform. Note that this can even be another state
-        #         estimation node from robot_localization! However, that instance should *not* fuse the global data.
-        map_frame: map              # Defaults to "map" if unspecified
-        odom_frame: odom            # Defaults to "odom" if unspecified
-        base_link_frame: base_link  # Defaults to "base_link" if unspecified
-        world_frame: odom           # Defaults to the value of odom_frame if unspecified
-
-        # The filter accepts an arbitrary number of inputs from each input message type (nav_msgs/Odometry,
-        # geometry_msgs/PoseWithCovarianceStamped, geometry_msgs/TwistWithCovarianceStamped,
-        # sensor_msgs/Imu). To add an input, simply append the next number in the sequence to its "base" name, e.g., odom0,
-        # odom1, twist0, twist1, imu0, imu1, imu2, etc. The value should be the topic name. These parameters obviously have no
-        # default values, and must be specified.
-        odom0: example/odom
-
-        # Each sensor reading updates some or all of the filter's state. These options give you greater control over which
-        # values from each measurement are fed to the filter. For example, if you have an odometry message as input, but only
-        # want to use its Z position value, then set the entire vector to false, except for the third entry. The order of the
-        # values is x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Note that not some message types
-        # do not provide some of the state variables estimated by the filter. For example, a TwistWithCovarianceStamped message
-        # has no pose information, so the first six values would be meaningless in that case. Each vector defaults to all false
-        # if unspecified, effectively making this parameter required for each sensor.
-        odom0_config: [true,  true,  false,
-                       false, false, false,
-                       false, false, false,
-                       false, false, true,
-                       false, false, false]
-
-        # If you have high-frequency data or are running with a low frequency parameter value, then you may want to increase
-        # the size of the subscription queue so that more measurements are fused.
-        odom0_queue_size: 2
-
-        # [ADVANCED] When measuring one pose variable with two sensors, a situation can arise in which both sensors under-
-        # report their covariances. This can lead to the filter rapidly jumping back and forth between each measurement as they
-        # arrive. In these cases, it often makes sense to (a) correct the measurement covariances, or (b) if velocity is also
-        # measured by one of the sensors, let one sensor measure pose, and the other velocity. However, doing (a) or (b) isn't
-        # always feasible, and so we expose the differential parameter. When differential mode is enabled, all absolute pose
-        # data is converted to velocity data by differentiating the absolute pose measurements. These velocities are then
-        # integrated as usual. NOTE: this only applies to sensors that provide pose measurements; setting differential to true
-        # for twist measurements has no effect.
-        odom0_differential: false
-
-        # [ADVANCED] When the node starts, if this parameter is true, then the first measurement is treated as a "zero point"
-        # for all future measurements. While you can achieve the same effect with the differential paremeter, the key
-        # difference is that the relative parameter doesn't cause the measurement to be converted to a velocity before
-        # integrating it. If you simply want your measurements to start at 0 for a given sensor, set this to true.
-        odom0_relative: false
-
-        # [ADVANCED] Whether to use the starting pose of child_frame_id as the origin of odometry.
-        # Note: this is different from setting odom0_relative to true, as when child_frame is different from
-        # base_link_frame, the rotation of base_link will be coupled into the translation of child_frame.
-        # Set to true for fusing secondary odometry sources that are rigidly connected to base_link but has a non-zero
-        # offset from base_link.
-        odom0_pose_use_child_frame: false
-
-        # [ADVANCED] If your data is subject to outliers, use these threshold settings, expressed as Mahalanobis distances, to
-        # control how far away from the current vehicle state a sensor measurement is permitted to be. Each defaults to
-        # numeric_limits<double>::max() if unspecified. It is strongly recommended that these parameters be removed if not
-        # required. Data is specified at the level of pose and twist variables, rather than for each variable in isolation.
-        # For messages that have both pose and twist data, the parameter specifies to which part of the message we are applying
-        # the thresholds.
-        odom0_pose_rejection_threshold: 5.0
-        odom0_twist_rejection_threshold: 1.0
-
-        # Further input parameter examples
-        odom1: example/odom2
-        odom1_config: [false, false, true,
-                       false, false, false,
-                       false, false, false,
-                       false, false, true,
-                       false, false, false]
-        odom1_differential: false
-        odom1_relative: true
-        odom1_queue_size: 2
-        odom1_pose_rejection_threshold: 2.0
-        odom1_twist_rejection_threshold: 0.2
-        pose0: example/pose
-        pose0_config: [true,  true,  false,
-                       false, false, false,
-                       false, false, false,
-                       false, false, false,
-                       false, false, false]
-        pose0_differential: true
-        pose0_relative: false
-        pose0_queue_size: 5
-        pose0_rejection_threshold: 2.0  # Note the difference in parameter name
-
-        twist0: example/twist
-        twist0_config: [false, false, false,
-                        false, false, false,
-                        true,  true,  true,
-                        false, false, false,
-                        false, false, false]
-        twist0_queue_size: 3
-        twist0_rejection_threshold: 2.0
-
-        imu0: example/imu
-        imu0_config: [false, false, false,
-                      true,  true,  true,
-                      false, false, false,
-                      true,  true,  true,
-                      true,  true,  true]
-        imu0_differential: false
-        imu0_relative: true
-        imu0_queue_size: 5
-        imu0_pose_rejection_threshold: 0.8                 # Note the difference in parameter names
-        imu0_twist_rejection_threshold: 0.8                #
-        imu0_linear_acceleration_rejection_threshold: 0.8  #
-
-        # [ADVANCED] Some IMUs automatically remove acceleration due to gravity, and others don't. If yours doesn't, please set
-        # this to true, and *make sure* your data conforms to REP-103, specifically, that the data is in ENU frame.
-        imu0_remove_gravitational_acceleration: true
-
-        # [ADVANCED]  The EKF and UKF models follow a standard predict/correct cycle. During prediction, if there is no
-        # acceleration reference, the velocity at time t+1 is simply predicted to be the same as the velocity at time t. During
-        # correction, this predicted value is fused with the measured value to produce the new velocity estimate. This can be
-        # problematic, as the final velocity will effectively be a weighted average of the old velocity and the new one. When
-        # this velocity is the integrated into a new pose, the result can be sluggish covergence. This effect is especially
-        # noticeable with LIDAR data during rotations. To get around it, users can try inflating the process_noise_covariance
-        # for the velocity variable in question, or decrease the  variance of the variable in question in the measurement
-        # itself. In addition, users can also take advantage of the control command being issued to the robot at the time we
-        # make the prediction. If control is used, it will get converted into an acceleration term, which will be used during
-        # predicition. Note that if an acceleration measurement for the variable in question is available from one of the
-        # inputs, the control term will be ignored.
-        # Whether or not we use the control input during predicition. Defaults to false.
-        use_control: true
-
-        # Whether the input (assumed to be cmd_vel) is a geometry_msgs/Twist or geometry_msgs/TwistStamped message. Defaults to
-        # false.
-        stamped_control: false
-
-        # The last issued control command will be used in prediction for this period. Defaults to 0.2.
-        control_timeout: 0.2
-
-        # Which velocities are being controlled. Order is vx, vy, vz, vroll, vpitch, vyaw.
-        control_config: [true, false, false, false, false, true]
-
-        # Places limits on how large the acceleration term will be. Should match your robot's kinematics.
-        acceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 3.4]
-
-        # Acceleration and deceleration limits are not always the same for robots.
-        deceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 4.5]
-
-        # If your robot cannot instantaneously reach its acceleration limit, the permitted change can be controlled with these
-        # gains
-        acceleration_gains: [0.8, 0.0, 0.0, 0.0, 0.0, 0.9]
-
-        # If your robot cannot instantaneously reach its deceleration limit, the permitted change can be controlled with these
-        # gains
-        deceleration_gains: [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
-
-        # [ADVANCED] The process noise covariance matrix can be difficult to tune, and can vary for each application, so it is
-        # exposed as a configuration parameter. This matrix represents the noise we add to the total error after each
-        # prediction step. The better the omnidirectional motion model matches your system, the smaller these values can be.
-        # However, if users find that a given variable is slow to converge, one approach is to increase the
-        # process_noise_covariance diagonal value for the variable in question, which will cause the filter's predicted error
-        # to be larger, which will cause the filter to trust the incoming measurement more during correction. The values are
-        # ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the matrix below if
-        # unspecified.
-        # Note: the specification of covariance matrices can be cumbersome, so all matrix parameters in this package support
-        # both full specification or specification of only the diagonal values.
-        process_noise_covariance: [0.05, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.05, 0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.06, 0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.03, 0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.03, 0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.06, 0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.025, 0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.025, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.04, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.01, 0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.01, 0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.02, 0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.01, 0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.01, 0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.015]
-
-        # [ADVANCED] This represents the initial value for the state estimate error covariance matrix. Setting a diagonal
-        # value (variance) to a large value will result in rapid convergence for initial measurements of the variable in
-        # question. Users should take care not to use large values for variables that will not be measured directly. The values
-        # are ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the diagonal values below
-        # if unspecified. In this example, we specify only the diagonal of the matrix.
-        initial_estimate_covariance: [1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9]
diff --git a/src/localisation/config/ekf_config_old_kolya.yaml b/src/localisation/config/ekf_config_old_kolya.yaml
deleted file mode 100644
index 82910f7fcbd3c053e6add7bc44375f7a3d590876..0000000000000000000000000000000000000000
--- a/src/localisation/config/ekf_config_old_kolya.yaml
+++ /dev/null
@@ -1,130 +0,0 @@
-### ekf config file ###
-ekf_filter_node:
-    ros__parameters:
-        # The frequency, in Hz, at which the filter will output a position estimate. Note that the filter will not begin
-        # computation until it receives at least one message from one of the inputs. It will then run continuously at the
-        # frequency specified here, regardless of whether it receives more measurements. Defaults to 30 if unspecified.
-        frequency: 30.0
-
-        # The period, in seconds, after which we consider a sensor to have timed out. In this event, we carry out a predict
-        # cycle on the EKF without correcting it. This parameter can be thought of as the minimum frequency with which the
-        # filter will generate new output. Defaults to 1 / frequency if not specified.
-        sensor_timeout: 1.0
-
-        # ekf_localization_node and ukf_localization_node both use a 3D omnidirectional motion model. If this parameter is
-        # set to true, no 3D information will be used in your state estimate. Use this if you are operating in a planar
-        # environment and want to ignore the effect of small variations in the ground plane that might otherwise be detected
-        # by, for example, an IMU. Defaults to false if unspecified.
-        two_d_mode: false
-
-        # Use this parameter to provide an offset to the transform generated by ekf_localization_node. This can be used for
-        # future dating the transform, which is required for interaction with some other packages. Defaults to 0.0 if
-        # unspecified.
-        transform_time_offset: 0.0
-
-        # Use this parameter to provide specify how long the tf listener should wait for a transform to become available.
-        # Defaults to 0.0 if unspecified.
-        transform_timeout: 0.0
-
-        # If you're having trouble, try setting this to true, and then echo the /diagnostics_agg topic to see if the node is
-        # unhappy with any settings or data.
-        print_diagnostics: true
-
-        # Debug settings. Not for the faint of heart. Outputs a ludicrous amount of information to the file specified by
-        # debug_out_file. I hope you like matrices! Please note that setting this to true will have strongly deleterious
-        # effects on the performance of the node. Defaults to false if unspecified.
-        debug: true
-
-        # Defaults to "robot_localization_debug.txt" if unspecified. Please specify the full path.
-        debug_out_file: ./file.txt
-
-        # Whether we'll allow old measurements to cause a re-publication of the updated state
-        permit_corrected_publication: false
-
-        # Whether to publish the acceleration state. Defaults to false if unspecified.
-        publish_acceleration: false
-
-        # Whether to broadcast the transformation over the /tf topic. Defaults to true if unspecified.
-        publish_tf: true
-
-        # REP-105 (http://www.ros.org/reps/rep-0105.html) specifies four principal coordinate frames: base_link, odom, map, and
-        # earth. base_link is the coordinate frame that is affixed to the robot. Both odom and map are world-fixed frames.
-        # The robot's position in the odom frame will drift over time, but is accurate in the short term and should be
-        # continuous. The odom frame is therefore the best frame for executing local motion plans. The map frame, like the odom
-        # frame, is a world-fixed coordinate frame, and while it contains the most globally accurate position estimate for your
-        # robot, it is subject to discrete jumps, e.g., due to the fusion of GPS data or a correction from a map-based
-        # localization node. The earth frame is used to relate multiple map frames by giving them a common reference frame.
-        # ekf_localization_node and ukf_localization_node are not concerned with the earth frame.
-        # Here is how to use the following settings:
-        # 1. Set the map_frame, odom_frame, and base_link frames to the appropriate frame names for your system.
-        #     1a. If your system does not have a map_frame, just remove it, and make sure "world_frame" is set to the value of
-        #         odom_frame.
-        # 2. If you are fusing continuous position data such as wheel encoder odometry, visual odometry, or IMU data, set
-        #   "world_frame" to your odom_frame value. This is the default behavior for robot_localization's state estimation nodes.
-        # 3. If you are fusing global absolute position data that is subject to discrete jumps (e.g., GPS or position updates
-        # from landmark observations) then:
-        #     3a. Set your "world_frame" to your map_frame value
-        #     3b. MAKE SURE something else is generating the odom->base_link transform. Note that this can even be another state
-        #         estimation node from robot_localization! However, that instance should *not* fuse the global data.
-        odom_frame: odom            # Defaults to "odom" if unspecified
-        base_link_frame: base_link  # Defaults to "base_link" if unspecified
-        world_frame: world           # Defaults to the value of odom_frame if unspecified
-
-        pose0: /pose
-        pose0_config: [true,  true,  false,
-                       false, false, false,
-                       false, false, false,
-                       false, false, false,
-                       false, false, false]
-        pose0_differential: false
-        pose0_relative: false
-        pose0_queue_size: 5
-        pose0_rejection_threshold: 100.0  # Note the difference in parameter name
-
-
-        # Places limits on how large the acceleration term will be. Should match your robot's kinematics.
-        acceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 3.4]
-
-        # Acceleration and deceleration limits are not always the same for robots.
-        deceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 4.5]
-
-        # If your robot cannot instantaneously reach its acceleration limit, the permitted change can be controlled with these
-        # gains
-        acceleration_gains: [0.8, 0.0, 0.0, 0.0, 0.0, 0.9]
-
-        # If your robot cannot instantaneously reach its deceleration limit, the permitted change can be controlled with these
-        # gains
-        deceleration_gains: [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
-
-        # [ADVANCED] The process noise covariance matrix can be difficult to tune, and can vary for each application, so it is
-        # exposed as a configuration parameter. This matrix represents the noise we add to the total error after each
-        # prediction step. The better the omnidirectional motion model matches your system, the smaller these values can be.
-        # However, if users find that a given variable is slow to converge, one approach is to increase the
-        # process_noise_covariance diagonal value for the variable in question, which will cause the filter's predicted error
-        # to be larger, which will cause the filter to trust the incoming measurement more during correction. The values are
-        # ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the matrix below if
-        # unspecified.
-        # Note: the specification of covariance matrices can be cumbersome, so all matrix parameters in this package support
-        # both full specification or specification of only the diagonal values.
-        process_noise_covariance: [0.05, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.05, 0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.06, 0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.03, 0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.03, 0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.06, 0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.025, 0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.025, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.04, 0.0,    0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.01, 0.0,    0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.01, 0.0,    0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.02, 0.0,    0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.01, 0.0,    0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.01, 0.0,
-                                   0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.0,     0.0,     0.0,    0.0,    0.0,    0.0,    0.0,    0.0,    0.015]
-
-        # [ADVANCED] This represents the initial value for the state estimate error covariance matrix. Setting a diagonal
-        # value (variance) to a large value will result in rapid convergence for initial measurements of the variable in
-        # question. Users should take care not to use large values for variables that will not be measured directly. The values
-        # are ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the diagonal values below
-        # if unspecified. In this example, we specify only the diagonal of the matrix.
-        initial_estimate_covariance: [1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9, 1e-9]
diff --git a/src/localisation/launch/dummy_launch.launch.py b/src/localisation/launch/dummy_launch.launch.py
deleted file mode 100644
index 981c2c9c54a3bb1b22c561b3494558bcb8e34e26..0000000000000000000000000000000000000000
--- a/src/localisation/launch/dummy_launch.launch.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    Node(
-        package="localisation",
-        executable="localisation_node",
-    )
-
-    dump_aruco_pub_node = Node(
-        package="localisation",
-        executable="dump_aruco_pub_node",
-    )
-
-    # ld.add_action(localisation_node)
-    ld.add_action(dump_aruco_pub_node)
-
-    return ld
diff --git a/src/localisation/launch/dummy_loc_multilaunch.launch.py b/src/localisation/launch/dummy_loc_multilaunch.launch.py
deleted file mode 100644
index ebac92e17465002285f1706dd05cf02a140f9993..0000000000000000000000000000000000000000
--- a/src/localisation/launch/dummy_loc_multilaunch.launch.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    localisation_node_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_0/filtered_pose"),
-            ("/pose", "/robot_0/pose"),
-        ],
-    )
-
-    localisation_node_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_1/filtered_pose"),
-            ("/pose", "/robot_1/pose"),
-        ],
-    )
-
-    localisation_node_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_2/filtered_pose"),
-            ("/pose", "/robot_2/pose"),
-        ],
-    )
-
-    localisation_node_op_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_0/filtered_pose"),
-            ("/pose", "/opponent_robot_0/pose"),
-        ],
-    )
-
-    localisation_node_op_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_1/filtered_pose"),
-            ("/pose", "/opponent_robot_1/pose"),
-        ],
-    )
-
-    localisation_node_op_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_2/filtered_pose"),
-            ("/pose", "/opponent_robot_2/pose"),
-        ],
-    )
-
-    dump_aruco_pub_node_1 = Node(
-        package="localisation",
-        executable="dump_aruco_pub_node",
-        remappings=[("/pose", "robot_1/pose")],
-    )
-
-    ld.add_action(localisation_node_0)
-    ld.add_action(localisation_node_1)
-    ld.add_action(localisation_node_2)
-    ld.add_action(localisation_node_op_0)
-    ld.add_action(localisation_node_op_1)
-    ld.add_action(localisation_node_op_2)
-    ld.add_action(dump_aruco_pub_node_1)
-
-    return ld
diff --git a/src/localisation/launch/ekf_filter.launch.py b/src/localisation/launch/ekf_filter.launch.py
deleted file mode 100644
index 1a52a759fc74aab81167a76337b56a0a0c13582a..0000000000000000000000000000000000000000
--- a/src/localisation/launch/ekf_filter.launch.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-import os
-from ament_index_python.packages import get_package_share_directory
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    config = os.path.join(
-        get_package_share_directory("localisation"), "config", "ekf_old_rosbag.yaml"
-    )
-
-    ekf_node = Node(
-        package="robot_localization", executable="ekf_node", parameters=[config]
-    )
-
-    ld.add_action(ekf_node)
-
-    return ld
diff --git a/src/localisation/launch/for_rosbag.launch.py b/src/localisation/launch/for_rosbag.launch.py
deleted file mode 100644
index 27b9c4b2353ee9c248a87bc7fa652db598d84144..0000000000000000000000000000000000000000
--- a/src/localisation/launch/for_rosbag.launch.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-import os
-from ament_index_python.packages import get_package_share_directory
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    config = os.path.join(
-        get_package_share_directory("localisation"), "config", "ekf_old_rosbag.yaml"
-    )
-
-    ekf_node = Node(
-        package="robot_localization", executable="ekf_node", parameters=[config]
-    )
-
-    repub_node = Node(package="localisation", executable="repub_node")
-
-    decode_node = Node(package="localisation", executable="decode_node")
-
-    node_tf = Node(
-        package="tf2_ros",
-        executable="static_transform_publisher",
-        name="tf_node",
-        arguments=[
-            str(-3.3309518094660846),
-            str(0.037100710111238944),
-            str(2.332057054710013),
-            str(-0.6329399726514903),
-            str(0.6916649217796449),
-            str(-0.2771814078615649),
-            str(0.2101358944477822),
-            "world",
-            "camera1",
-        ],
-    )
-
-    node_tf_2 = Node(
-        package="tf2_ros",
-        executable="static_transform_publisher",
-        name="tf_node_2",
-        arguments=[
-            str(3.5080489014918403),
-            str(0.43251995434233015),
-            str(2.3834138399754186),
-            str(0.6478792333222997),
-            str(0.6697717357895593),
-            str(-0.25583614030774876),
-            str(-0.2573056359265846),
-            "world",
-            "camera2",
-        ],
-    )
-
-    ld.add_action(repub_node)
-    ld.add_action(decode_node)
-    ld.add_action(ekf_node)
-    ld.add_action(node_tf)
-    ld.add_action(node_tf_2)
-
-    return ld
diff --git a/src/localisation/launch/loc_multilaunch.launch.py b/src/localisation/launch/loc_multilaunch.launch.py
deleted file mode 100644
index 6e85b2c03f29db1bf330479a81906f2d9c4b9b0b..0000000000000000000000000000000000000000
--- a/src/localisation/launch/loc_multilaunch.launch.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    localisation_node_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_0/filtered_pose"),
-            ("/pose", "/robot_0/pose"),
-            ("/fallen", "/robot_0/fallen"),
-        ],
-    )
-
-    localisation_node_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_1/filtered_pose"),
-            ("/pose", "/robot_1/pose"),
-            ("/fallen", "/robot_1/fallen"),
-        ],
-    )
-
-    localisation_node_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_2/filtered_pose"),
-            ("/pose", "/robot_2/pose"),
-            ("/fallen", "/robot_2/fallen"),
-        ],
-    )
-
-    localisation_node_op_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_0/filtered_pose"),
-            ("/pose", "/opponent_robot_0/pose"),
-            ("/fallen", "/opponent_robot_0/fallen"),
-        ],
-    )
-
-    localisation_node_op_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_1/filtered_pose"),
-            ("/pose", "/opponent_robot_1/pose"),
-            ("/fallen", "/opponent_robot_1/fallen"),
-        ],
-    )
-
-    localisation_node_op_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_2/filtered_pose"),
-            ("/pose", "/opponent_robot_2/pose"),
-            ("/fallen", "/opponent_robot_2/fallen"),
-        ],
-    )
-
-    ld.add_action(localisation_node_0)
-    ld.add_action(localisation_node_1)
-    ld.add_action(localisation_node_2)
-    ld.add_action(localisation_node_op_0)
-    ld.add_action(localisation_node_op_1)
-    ld.add_action(localisation_node_op_2)
-
-    return ld
diff --git a/src/localisation/launch/localisation.launch.py b/src/localisation/launch/localisation.launch.py
index fe3857ae9f7468a2ab7450023302083d758a89f0..70649fbc7f14c51d84cb029870a7c23ccc679a84 100644
--- a/src/localisation/launch/localisation.launch.py
+++ b/src/localisation/launch/localisation.launch.py
@@ -2,24 +2,38 @@ from launch import LaunchDescription
 from launch_ros.actions import Node
 import os
 from ament_index_python.packages import get_package_share_directory
+from launch.actions import DeclareLaunchArgument
+from launch.substitutions import LaunchConfiguration
 
 
 def generate_launch_description():
     ld = LaunchDescription()
 
     config = os.path.join(
-        get_package_share_directory("localisation"), "config", "ekf_old_rosbag.yaml"
+        get_package_share_directory("localisation"), "config", "androsot.yaml"
     )
 
+    # node_name_arg = DeclareLaunchArgument(
+    #     "robot_0/ekf_filter_node",
+    #     default_value='my_node'
+    # )
+
     ekf_node = Node(
-        package="robot_localization", executable="ekf_node", parameters=[config]
+        package="robot_localization", executable="ekf_node", parameters=[config],
+        # name="robot_0_ekf_filter_node",
+        remappings=[
+            ("pose", "robot_0/pose"),
+            ("odometry/filtered", "robot_0/odometry/filtered"),
+            ("set_pose", "robot_0/set_pose")
+        ]
     )
 
-    repub_node = Node(package="localisation", executable="repub_node")
-
-    decode_node = Node(package="localisation", executable="decode_node")
+    decode_node = Node(
+        package="localisation",
+        executable="decode_node",
+        namespace="robot_0"
+    )
 
-    ld.add_action(repub_node)
     ld.add_action(decode_node)
     ld.add_action(ekf_node)
 
diff --git a/src/localisation/launch/old_rosbag.launch.py b/src/localisation/launch/old_rosbag.launch.py
deleted file mode 100644
index 27b9c4b2353ee9c248a87bc7fa652db598d84144..0000000000000000000000000000000000000000
--- a/src/localisation/launch/old_rosbag.launch.py
+++ /dev/null
@@ -1,62 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-import os
-from ament_index_python.packages import get_package_share_directory
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    config = os.path.join(
-        get_package_share_directory("localisation"), "config", "ekf_old_rosbag.yaml"
-    )
-
-    ekf_node = Node(
-        package="robot_localization", executable="ekf_node", parameters=[config]
-    )
-
-    repub_node = Node(package="localisation", executable="repub_node")
-
-    decode_node = Node(package="localisation", executable="decode_node")
-
-    node_tf = Node(
-        package="tf2_ros",
-        executable="static_transform_publisher",
-        name="tf_node",
-        arguments=[
-            str(-3.3309518094660846),
-            str(0.037100710111238944),
-            str(2.332057054710013),
-            str(-0.6329399726514903),
-            str(0.6916649217796449),
-            str(-0.2771814078615649),
-            str(0.2101358944477822),
-            "world",
-            "camera1",
-        ],
-    )
-
-    node_tf_2 = Node(
-        package="tf2_ros",
-        executable="static_transform_publisher",
-        name="tf_node_2",
-        arguments=[
-            str(3.5080489014918403),
-            str(0.43251995434233015),
-            str(2.3834138399754186),
-            str(0.6478792333222997),
-            str(0.6697717357895593),
-            str(-0.25583614030774876),
-            str(-0.2573056359265846),
-            "world",
-            "camera2",
-        ],
-    )
-
-    ld.add_action(repub_node)
-    ld.add_action(decode_node)
-    ld.add_action(ekf_node)
-    ld.add_action(node_tf)
-    ld.add_action(node_tf_2)
-
-    return ld
diff --git a/src/localisation/launch/raw_localisation.launch.py b/src/localisation/launch/raw_localisation.launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb4c862cd63438bd842ef074378ed63ac4753018
--- /dev/null
+++ b/src/localisation/launch/raw_localisation.launch.py
@@ -0,0 +1,19 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+import os
+from ament_index_python.packages import get_package_share_directory
+
+
+def generate_launch_description():
+    ld = LaunchDescription()
+
+    decode_node = Node(
+        package="localisation",
+        executable="decode_node",
+        remappings=[("/localization", "/robot_0/localization"),
+                    ("/decoded_filtered_pose", "/robot_0/pose")],
+    )
+
+    ld.add_action(decode_node)
+
+    return ld
diff --git a/src/localisation/launch/renamed_multi.launch.py b/src/localisation/launch/renamed_multi.launch.py
deleted file mode 100644
index d91a1c0da69627524438748b8bf921953f5acad5..0000000000000000000000000000000000000000
--- a/src/localisation/launch/renamed_multi.launch.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-
-
-def generate_launch_description():
-    ld = LaunchDescription()
-
-    localisation_node_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_0/filtered_pose"),
-            ("/fallen", "/robot_0/fallen"),
-            ("/filtered_pose_3d_robot_0", "filt_3d"),
-        ],
-    )
-
-    localisation_node_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_1/filtered_pose"),
-            ("/pose", "/robot_1/pose"),
-            ("/fallen", "/robot_1/fallen"),
-        ],
-    )
-
-    localisation_node_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/robot_2/filtered_pose"),
-            ("/pose", "/robot_2/pose"),
-            ("/fallen", "/robot_2/fallen"),
-        ],
-    )
-
-    localisation_node_op_0 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_0/filtered_pose"),
-            ("/pose", "/opponent_robot_0/pose"),
-            ("/fallen", "/opponent_robot_0/fallen"),
-        ],
-    )
-
-    localisation_node_op_1 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_1/filtered_pose"),
-            ("/pose", "/opponent_robot_1/pose"),
-            ("/fallen", "/opponent_robot_1/fallen"),
-        ],
-    )
-
-    localisation_node_op_2 = Node(
-        package="localisation",
-        executable="localisation_node",
-        remappings=[
-            ("/filtered_pose", "/opponent_robot_2/filtered_pose"),
-            ("/pose", "/opponent_robot_2/pose"),
-            ("/fallen", "/opponent_robot_2/fallen"),
-        ],
-    )
-
-    ld.add_action(localisation_node_0)
-    ld.add_action(localisation_node_1)
-    ld.add_action(localisation_node_2)
-    ld.add_action(localisation_node_op_0)
-    ld.add_action(localisation_node_op_1)
-    ld.add_action(localisation_node_op_2)
-
-    return ld
diff --git a/src/localisation/localisation/localisation_node.py b/src/localisation/localisation/alpha_beta.py
similarity index 100%
rename from src/localisation/localisation/localisation_node.py
rename to src/localisation/localisation/alpha_beta.py
diff --git a/src/localisation/localisation/dummy_filtered_decoder.py b/src/localisation/localisation/dummy_filtered_decoder.py
index 997c4998c4eba515405273daa9251bc57f2c26b1..205fea1bbbc4fd4ddb64992eb3cc9df38285fc9a 100644
--- a/src/localisation/localisation/dummy_filtered_decoder.py
+++ b/src/localisation/localisation/dummy_filtered_decoder.py
@@ -1,4 +1,5 @@
 from geometry_msgs.msg import PoseWithCovarianceStamped
+from std_msgs.msg import Bool
 from nav_msgs.msg import Odometry
 
 import rclpy
@@ -7,6 +8,8 @@ from rclpy.node import Node
 from geometry_msgs.msg import Pose2D
 import tf_transformations as tf_t
 
+from collections import deque
+
 
 def pose_to_pose2d(pose_msg):
     """
@@ -43,18 +46,26 @@ class DecodeNode(Node):
         super().__init__("decode_node")
         self.robot_id = 0
         self.pose_pub = self.create_publisher(
-            PoseWithCovarianceStamped, "/decoded_filtered_pose", 10
+            PoseWithCovarianceStamped, "filtered_3d_pose", 10
         )
-        self.pose2d_pub = self.create_publisher(Pose2D, "/localization", 10)
-        self.create_subscription(Odometry, "/odometry/filtered", self.pose_callback, 10)
+        self.pose2d_pub = self.create_publisher(Pose2D, "localization", 10)
+        self.create_subscription(Odometry, "odometry/filtered", self.pose_callback, 10)
         self.counter = 0
+        self.fall_pub = self.create_publisher(Bool, "fallen", 10)
+        self.fall_z_thr = 0.10
+        self.fallen_filter = deque()
 
     def pose_callback(self, msg):
         new_msg = PoseWithCovarianceStamped()
         new_msg.pose = msg.pose
         new_msg.header.stamp = self.get_clock().now().to_msg()
         new_msg.header.frame_id = "world"
-
+        if len(self.fallen_filter) == 10:
+            self.fallen_filter.popleft()
+        self.fallen_filter.append(msg.pose.pose.position.z < self.fall_z_thr)
+        fall_msg = Bool()
+        fall_msg.data = all(self.fallen_filter) and len(self.fallen_filter) == 10
+        self.fall_pub.publish(fall_msg)
         self.pose_pub.publish(new_msg)
 
         pose2d_msg = pose_to_pose2d(new_msg.pose.pose)
diff --git a/src/localisation/localisation/dummy_gui_localization.py b/src/localisation/localisation/dummy_gui_localization.py
index 5f66cf84bae0fedac7a629bd4ba457ae96753b20..fbed51234e27a2529d7cc50fc5c6b70cc1b8c13d 100644
--- a/src/localisation/localisation/dummy_gui_localization.py
+++ b/src/localisation/localisation/dummy_gui_localization.py
@@ -128,7 +128,8 @@ class DummyLocalizationGuiNode(rclpy.node.Node):
         # Movement parameters
         self.move_step = 10
         self.turn_step = 10
-        self.stop = False
+        self.do_publish_localization = True
+        self.do_publish_target = True
 
     def __update(self) -> None:
         # Draw the football field
@@ -210,8 +211,9 @@ class DummyLocalizationGuiNode(rclpy.node.Node):
                 self.move_step * math.sin(math.radians(self.target_angle + 90))
             )
         elif key == ord("f"):
-            self.stop = not self.stop
-
+            self.do_publish_localization = not self.do_publish_localization
+        elif key == ord("l"):
+            self.do_publish_target = not self.do_publish_target
         elif key == 27:  # ESC key to exit
             raise Exception("Exit...")
 
@@ -233,11 +235,15 @@ class DummyLocalizationGuiNode(rclpy.node.Node):
             *picture_coord_to_real(self.target_position[0], self.target_position[1]),
             self.target_angle_rad,
         )
-        if self.stop:
-            self.__target_pose_pub.publish(loc_msg)
+        # self.__target_pose_pub.publish(target_msg)
+        # self.__pose_pub.publish(loc_msg)
+        if self.do_publish_localization:
             self.__pose_pub.publish(loc_msg)
-        else:
+
+        if self.do_publish_target:
             self.__target_pose_pub.publish(target_msg)
+        if not self.do_publish_target and not self.do_publish_localization:
+            self.__target_pose_pub.publish(loc_msg)
             self.__pose_pub.publish(loc_msg)
 
     @staticmethod
diff --git a/src/localisation/localisation/dummy_odom.py b/src/localisation/localisation/dummy_odom.py
deleted file mode 100644
index 713c0ea5a8581d54a60ba1b271a9d04cbc854064..0000000000000000000000000000000000000000
--- a/src/localisation/localisation/dummy_odom.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import rclpy
-from rclpy.node import Node
-from nav_msgs.msg import Odometry
-from geometry_msgs.msg import PoseWithCovariance, TwistWithCovariance
-
-
-class OdometryPublisher(Node):
-    def __init__(self):
-        super().__init__("odometry_publisher")
-        self.publisher_ = self.create_publisher(Odometry, "dummy_odom", 10)
-        self.timer = self.create_timer(0.1, self.publish_odometry)
-
-    def publish_odometry(self):
-        now = self.get_clock().now().to_msg()
-        odom_msg = Odometry()
-        odom_msg.header.stamp = now
-        odom_msg.header.frame_id = "odom"
-        odom_msg.child_frame_id = "base_link"
-
-        pose = PoseWithCovariance()
-        pose.pose.position.x = 0.0
-        pose.pose.position.y = 0.0
-        pose.pose.position.z = 0.0
-        pose.pose.orientation.x = 0.0
-        pose.pose.orientation.y = 0.0
-        pose.pose.orientation.z = 0.0
-        pose.pose.orientation.w = 1.0
-        pose.covariance = [0.0] * 36
-        pose.covariance[0] = 1e-3
-        pose.covariance[7] = 1e-3
-        pose.covariance[14] = 1e-3
-        pose.covariance[21] = 1e-2
-        pose.covariance[28] = 1e-2
-        pose.covariance[35] = 1e-2
-
-        odom_msg.pose = pose
-
-        twist = TwistWithCovariance()
-        twist.twist.linear.x = 0.0
-        twist.twist.linear.y = 0.0
-        twist.twist.angular.z = 0.0
-        twist.covariance = [0.0] * 36
-        twist.covariance[0] = 1e-3
-        twist.covariance[7] = 1e-3
-        twist.covariance[14] = 1e-3
-        twist.covariance[21] = 1e-2
-        twist.covariance[28] = 1e-2
-        twist.covariance[35] = 1e-2
-
-        odom_msg.twist = twist
-
-        self.publisher_.publish(odom_msg)
-
-
-def main(args=None):
-    rclpy.init(args=args)
-    odometry_publisher = OdometryPublisher()
-    rclpy.spin(odometry_publisher)
-    odometry_publisher.destroy_node()
-    rclpy.shutdown()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/src/localisation/localisation/dump_aruco_publisher.py b/src/localisation/localisation/dump_aruco_publisher.py
deleted file mode 100644
index b8a6ffa586bfbb2d550ee60748788de1fb2bd64a..0000000000000000000000000000000000000000
--- a/src/localisation/localisation/dump_aruco_publisher.py
+++ /dev/null
@@ -1,125 +0,0 @@
-from geometry_msgs.msg import PoseWithCovarianceStamped
-
-import rclpy
-from rclpy.node import Node
-
-# from geometry_msgs.msg import TransformStamped
-# from tf2_ros.static_transform_broadcaster import StaticTransformBroadcaster
-# from tf2_ros import TransformBroadcaster
-import math
-import numpy as np
-
-
-def quaternion_from_euler(ai, aj, ak):
-    ai /= 2.0
-    aj /= 2.0
-    ak /= 2.0
-    ci = math.cos(ai)
-    si = math.sin(ai)
-    cj = math.cos(aj)
-    sj = math.sin(aj)
-    ck = math.cos(ak)
-    sk = math.sin(ak)
-    cc = ci * ck
-    cs = ci * sk
-    sc = si * ck
-    ss = si * sk
-
-    q = np.empty((4,))
-    q[0] = cj * sc - sj * cs
-    q[1] = cj * ss + sj * cc
-    q[2] = cj * cs - sj * sc
-    q[3] = cj * cc + sj * ss
-
-    return q
-
-
-class DumpArucoPubNode(Node):
-    def __init__(self):
-        super().__init__("dump_aruco_pub_node")
-        self.robot_id = 0
-        self.pose_pub = self.create_publisher(PoseWithCovarianceStamped, "/pose", 10)
-        timer_period = 0.1  # seconds
-        self.timer = self.create_timer(timer_period, self.timer_callback)
-        self.counter = 0
-
-    def timer_callback(self):
-        self.counter += 1
-        msg = PoseWithCovarianceStamped()
-        msg.header.stamp = self.get_clock().now().to_msg()
-        msg.header.frame_id = "map"
-        # yaw =
-        x = 1.0
-        y = 3.0
-        z = 0.4
-        q_x = 0.0
-        q_y = -0.819
-        q_z = 0.0
-        q_w = 0.574
-
-        msg.pose.pose.position.x = x
-        msg.pose.pose.position.y = y
-        msg.pose.pose.position.z = z
-        msg.pose.pose.orientation.x = q_x
-        msg.pose.pose.orientation.y = q_y
-        msg.pose.pose.orientation.z = q_z
-        msg.pose.pose.orientation.w = q_w
-
-        msg.pose.covariance = [
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-            1e-3,
-        ]
-
-        self.pose_pub.publish(msg)
-
-
-def main(args=None):
-    rclpy.init(args=args)
-
-    node = DumpArucoPubNode()
-
-    rclpy.spin(node)
-
-    # Destroy the node explicitly
-    # (optional - otherwise it will be done automatically
-    # when the garbage collector destroys the node object)
-    node.destroy_node()
-    rclpy.shutdown()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/src/localisation/setup.py b/src/localisation/setup.py
index b103ed367626039c484da16b0a4e7da73e8e54aa..361964f82f41c2474a8516eef967a936d72e7fec 100644
--- a/src/localisation/setup.py
+++ b/src/localisation/setup.py
@@ -29,9 +29,7 @@ setup(
     tests_require=["pytest"],
     entry_points={
         "console_scripts": [
-            "localisation_node = localisation.localisation_node:main",
-            "dump_aruco_pub_node = localisation.dump_aruco_publisher:main",
-            "dummy_odom_node = localisation.dummy_odom:main",
+            "alpha_beta = localisation.alpha_beta:main",
             "repub_node = localisation.pose_republisher:main",
             "decode_node = localisation.dummy_filtered_decoder:main",
             "dummy_gui_localization = localisation.dummy_gui_localization:main",
diff --git a/src/localization_preprocessor/config/cube.urdf b/src/localization_preprocessor/config/cube.urdf
index 823643f0ed965b26dc44501be7e17e624bf7d201..676c8b9a89a83d5a65db9a8c8510b4ca0c0aa88c 100644
--- a/src/localization_preprocessor/config/cube.urdf
+++ b/src/localization_preprocessor/config/cube.urdf
@@ -44,31 +44,31 @@
   </link>
 
 
-  <joint name="base_to_right_leg1" type="fixed">
+  <joint name="up_joint" type="fixed">
     <parent link="base_link"/>
     <child link="up"/>
     <origin rpy="0 0 1.56" xyz="0 0 0.035"/>
 
   </joint>
-  <joint name="base_to_right_leg2" type="fixed">
+  <joint name="front_joint" type="fixed">
     <parent link="base_link"/>
     <child link="front"/>
         <origin rpy="1.56 0 1.56" xyz="0.035 0 0"/>
 
   </joint>
-  <joint name="base_to_right_leg3" type="fixed">
+  <joint name="left_joint" type="fixed">
     <parent link="base_link"/>
     <child link="left"/>
         <origin rpy="1.56 0 0" xyz="0 -0.035 0"/>
 
   </joint>
-  <joint name="base_to_right_leg4" type="fixed">
+  <joint name="back_joint" type="fixed">
     <parent link="base_link"/>
     <child link="back"/>
         <origin rpy="1.56 0 -1.56" xyz="-0.035 0 0"/>
 
   </joint>
-  <joint name="base_to_right_leg5" type="fixed">
+  <joint name="right_joint" type="fixed">
     <parent link="base_link"/>
     <child link="right"/>
         <origin rpy="1.56 0 3.14" xyz="0 0.035 0"/>
diff --git a/src/localization_preprocessor/launch/aruco_preprocessor.launch.py b/src/localization_preprocessor/launch/aruco_preprocessor.launch.py
index 79d37c1e3ea7738152b365de18bd8b6cf12e7e33..11751cf565068ff178eb65ef0951c002d68ba904 100644
--- a/src/localization_preprocessor/launch/aruco_preprocessor.launch.py
+++ b/src/localization_preprocessor/launch/aruco_preprocessor.launch.py
@@ -25,6 +25,7 @@ def generate_launch_description():
             Node(
                 package="ros2_aruco",
                 executable="aruco_node",
+                name="aruco_prep_camera1",
                 parameters=[
                     {"camera_frame": "camera1"},
                     {"aruco_dictionary_id": "DICT_4X4_100"},
@@ -48,6 +49,7 @@ def generate_launch_description():
             Node(
                 package="ros2_aruco",
                 executable="aruco_node",
+                name="aruco_prep_camera2",
                 parameters=[
                     {"camera_frame": "camera2"},
                     {"aruco_dictionary_id": "DICT_4X4_100"},
diff --git a/src/localization_preprocessor/launch/multi_camera_aruco_preprocessor.launch.py b/src/localization_preprocessor/launch/multi_camera_aruco_preprocessor.launch.py
index 3a0de893fdbe508a4ae83cceec3878984d41bf73..be5854f59d4fd9ee6c37754c831233f8a3e132dc 100644
--- a/src/localization_preprocessor/launch/multi_camera_aruco_preprocessor.launch.py
+++ b/src/localization_preprocessor/launch/multi_camera_aruco_preprocessor.launch.py
@@ -25,6 +25,15 @@ def generate_launch_description():
                     [FindPackageShare("calibration_node"), "/launch", "/tf.launch.py"]
                 )
             ),
+            IncludeLaunchDescription(
+                PythonLaunchDescriptionSource(
+                    [
+                        FindPackageShare("localization_preprocessor"),
+                        "/launch",
+                        "/cube_pub.launch.py",
+                    ]
+                )
+            ),
             Node(
                 package="usb_cam_ros2",
                 executable="usb_cam_ros2_node_exe",
@@ -44,6 +53,7 @@ def generate_launch_description():
             Node(
                 package="ros2_aruco",
                 executable="aruco_node",
+                name="aruco_camera1",
                 parameters=[
                     {"camera_frame": "camera1"},
                     {"aruco_dictionary_id": "DICT_4X4_100"},
@@ -59,6 +69,7 @@ def generate_launch_description():
             Node(
                 package="ros2_aruco",
                 executable="aruco_node",
+                name="aruco_camera2",
                 parameters=[
                     {"camera_frame": "camera2"},
                     {"aruco_dictionary_id": "DICT_4X4_100"},
@@ -74,12 +85,9 @@ def generate_launch_description():
             Node(
                 package="localization_preprocessor",
                 executable="aruco_preprocessor_node",
+                name="aruco_camera1",
                 parameters=[
                     {"robots_params": robot_config},
-                    {"camera_frames": ["camera2"]},
-                ],
-                remappings=[
-                    ("/pose", "/robot_0/pose"),
                 ],
                 output="screen",
                 emulate_tty=True,
@@ -89,10 +97,6 @@ def generate_launch_description():
                 executable="aruco_preprocessor_node",
                 parameters=[
                     {"robots_params": robot_config},
-                    {"camera_frames": ["camera1"]},
-                ],
-                remappings=[
-                    ("/pose", "/robot_0/pose"),
                 ],
                 output="screen",
                 emulate_tty=True,
diff --git a/src/localization_preprocessor/launch/solo_camera_aruco_preprocessor.launch.py b/src/localization_preprocessor/launch/solo_camera_aruco_preprocessor.launch.py
index b49653e972473ea664ae1bc8c9d4da1979c90a95..9f2332c5249be5082dba1d4ead855899dd154768 100644
--- a/src/localization_preprocessor/launch/solo_camera_aruco_preprocessor.launch.py
+++ b/src/localization_preprocessor/launch/solo_camera_aruco_preprocessor.launch.py
@@ -53,7 +53,7 @@ def generate_launch_description():
                     {"camera_frames": ["camera1"]},
                 ],
                 remappings=[
-                    ("/pose", "/robot_test/pose"),
+                    ("/pose", "/robot_0/pose"),
                 ],
                 output="screen",
                 emulate_tty=True,
diff --git a/src/localization_preprocessor/localization_preprocessor/aruco_preprocessor_node.py b/src/localization_preprocessor/localization_preprocessor/aruco_preprocessor_node.py
index 7bb142e9336c650e8314d5c551735dd9a685fc0a..a802c29d4c89157bfa076535b20f65bbe2c53bec 100755
--- a/src/localization_preprocessor/localization_preprocessor/aruco_preprocessor_node.py
+++ b/src/localization_preprocessor/localization_preprocessor/aruco_preprocessor_node.py
@@ -22,6 +22,9 @@ import time
 import json
 import math
 import logging
+from math import degrees, sqrt
+
+ROBOT_TEMPLATE_TOPIC = "/robot_{id}/pose"
 
 
 class ArucoPreprocessorNode(Node):
@@ -30,7 +33,7 @@ class ArucoPreprocessorNode(Node):
 
         self.declare_parameter("aruco_markers_topic", "/aruco_markers")
         topic_name = self.get_parameter("aruco_markers_topic").value
-        self.publisher_ = self.create_publisher(PoseWithCovarianceStamped, "/pose", 10)
+        # self.publisher_ = self.create_publisher(PoseWithCovarianceStamped, "pose", 10)
 
         self.subscription = self.create_subscription(
             ArucoMarkers, topic_name, self.__aruco_cb, 10
@@ -44,8 +47,8 @@ class ArucoPreprocessorNode(Node):
         self.declare_parameter("camera_frames", ["camera1", "camera2"])
 
         self.camera_frames = ["camera1", "camera2"]
-        xyz_cov = 0.03
-        rpy_cov = 0.17
+        xyz_cov = 0.25
+        rpy_cov = 0.5
         self.covariance = [
             xyz_cov,  # 0 0
             0.0,
@@ -84,10 +87,16 @@ class ArucoPreprocessorNode(Node):
             0.0,
             rpy_cov,  # 35
         ]
+        self.rpy_cov_scale = 0.5
+        self.xyz_cov_scale = 1 / 64.0
+        self.max_dist_from_cam = 4.5  # if z=2, y=0 then x=4
+        self.z_fall_thr = 0.1
 
     def __init_robot_params(self) -> None:
         self.declare_parameter("robots_params", rclpy.Parameter.Type.STRING)
         self.aruco_rotation_map = {}
+        self.robot_publisher_map = {}
+        self.aruco_to_robot_id_map = {}
         robots_params_path = str(self.get_parameter("robots_params").value)
         robots_params = None
         with open(robots_params_path) as f:
@@ -99,23 +108,32 @@ class ArucoPreprocessorNode(Node):
         for robot_id, param in self.robots.items():
             aruco_ids = param["aruco_ids"]
             aruco_transforms = param["aruco_transforms"]
+            self.robot_publisher_map[robot_id] = self.create_publisher(
+                PoseWithCovarianceStamped, ROBOT_TEMPLATE_TOPIC.format(id=robot_id), 10
+            )
             for aruco_id, aruco_side in zip(aruco_ids, aruco_transforms):
                 self.aruco_rotation_map[aruco_id] = aruco_side
+                self.aruco_to_robot_id_map[aruco_id] = robot_id
 
     def __init_aruco_ids(self):
         self.aruco_ids = {}
         for robot_id, param in self.robots.items():
             self.aruco_ids[robot_id] = param["aruco_ids"]
 
-    def __set_camera_to_world_frame(self) -> None:
+    def __set_camera_to_world_frame(self) -> bool:
         try:
             for frame in self.camera_frames:
                 self.tf_from_camera_to_world[frame] = self.tf_buffer.lookup_transform(
-                    "world", frame, rclpy.time.Time()
+                    "world",
+                    frame,
+                    rclpy.time.Time(),
+                    timeout=rclpy.duration.Duration(seconds=1.0),
                 )
+            return True
         except TransformException as ex:
             time.sleep(0.1)
             self.get_logger().info(f"Could not transform world to camera: {ex}")
+            return False
 
     def __estimate_cov(self, r) -> float:
         # Example quaternion (x, y, z, w)
@@ -137,13 +155,23 @@ class ArucoPreprocessorNode(Node):
         )
         return cov
 
+    def __estimate_xyz_cov(self, position) -> float:
+        # Example quaternion (x, y, z, w)
+        x, y, z = position.x, position.y, position.z
+        dist_sq = x**2 + y**2 + z**2
+        return dist_sq
+
     def __aruco_cb(self, msg: ArucoMarkers) -> None:
-        self.__set_camera_to_world_frame()  # !!!!bullshiit
-        self.get_logger().info("start aruco cb")
-        for id, pose in zip(msg.marker_ids, msg.poses):
-            if id in self.aruco_rotation_map.keys():
-                world_pose = self.__get_world_pose(pose, msg.header.frame_id, id)
-                self.publisher_.publish(world_pose)
+        if not self.__set_camera_to_world_frame():  # !!!!bullshiit
+            return
+        # self.get_logger().info("start aruco cb")
+        for aruco_id, pose in zip(msg.marker_ids, msg.poses):
+            if aruco_id in self.aruco_rotation_map.keys() and self.first_filter(pose):
+                world_pose = self.__get_world_pose(pose, msg.header.frame_id, aruco_id)
+                if world_pose:
+                    self.robot_publisher_map[
+                        self.aruco_to_robot_id_map[aruco_id]
+                    ].publish(world_pose)
 
     def __get_world_pose(
         self, pose: Pose, frame_id: str, id: int
@@ -156,21 +184,62 @@ class ArucoPreprocessorNode(Node):
             self.get_clock().now().to_msg()
         )  # [TODO] Do not change timestamp
         cov = self.__estimate_cov(pose.orientation)
-        self.covariance[21] = cov
-        self.covariance[28] = cov
-        self.covariance[35] = cov
+        xyz_cov = self.__estimate_xyz_cov(pose.position)
+
+        self.covariance[21] = cov * self.rpy_cov_scale
+        self.covariance[28] = cov * self.rpy_cov_scale
+        self.covariance[35] = cov * self.rpy_cov_scale
+
+        self.covariance[0] = xyz_cov * self.xyz_cov_scale
+        self.covariance[7] = xyz_cov * self.xyz_cov_scale
+        self.covariance[14] = xyz_cov * self.xyz_cov_scale
         pose_with_cov.pose.covariance = self.covariance
-        base_link_position = self.transform_pose_with_covariance(
-            pose_with_cov, id
-        )  # i dont like this name
+        base_link_position = self.transform_pose_with_covariance(pose_with_cov, id)
         logging.debug(f"base_link_position:{base_link_position}")
+        r, p, y = euler_from_quaternion(
+            [
+                base_link_position.pose.pose.orientation.x,
+                base_link_position.pose.pose.orientation.y,
+                base_link_position.pose.pose.orientation.z,
+                base_link_position.pose.pose.orientation.w,
+            ]
+        )
+        rp_sum = abs(degrees(r)) + abs(degrees(p))
+        rp_big = rp_sum > 50.0
+        z_high = base_link_position.pose.pose.position.z > self.z_fall_thr
+        bad_aruco = rp_big * z_high
+        if bad_aruco:
+            return None
+        # else:
+        #     print(round(degrees(r)), round(degrees(p)), round(degrees(y)), cov)
+        #     print(base_link_position.pose.pose.position.z)
+        # self.get_logger().info(
+        #     f"not transformed: xyz_cov = {self.covariance[0]}, rpy_cov = {self.covariance[21]}"
+        # )
         return base_link_position  # world_position
 
+    def first_filter(self, pose):
+        x, y, z = pose.position.x, pose.position.y, pose.position.z
+        dist = sqrt(x**2 + y**2 + z**2)
+        if dist > self.max_dist_from_cam:
+            return False
+        else:
+            return True
+
+    def tf_patch(self, tf_list):
+        patch_time = self.get_clock().now()
+        for tf in tf_list:
+            tf.header.stamp = patch_time.to_msg()
+        return patch_time
+
     def transform_pose_with_covariance(
         self, aruco_pose: PoseWithCovarianceStamped, id: int
     ) -> PoseWithCovarianceStamped:
         world2camera = self.tf_buffer.lookup_transform(
-            "world", aruco_pose.header.frame_id, rclpy.time.Time()
+            "world",
+            aruco_pose.header.frame_id,
+            rclpy.time.Time(),
+            timeout=rclpy.duration.Duration(seconds=1.0),
         )
         camera2aruco_tf = ArucoPreprocessorNode.pose2tf(
             aruco_pose, self.aruco_rotation_map[id]
@@ -178,18 +247,23 @@ class ArucoPreprocessorNode(Node):
         aruco2base_link_tf = self.tf_buffer.lookup_transform_core(
             self.aruco_rotation_map[id], "base_link", rclpy.time.Time()
         )
-
         tf_buffer = BufferCore(rclpy.time.Time())
 
+        patch_time = self.tf_patch([world2camera, camera2aruco_tf, aruco2base_link_tf])
+
         tf_buffer.set_transform(camera2aruco_tf, "default_authority")
         tf_buffer.set_transform(aruco2base_link_tf, "default_authority")
         tf_buffer.set_transform(world2camera, "default_authority")
 
-        tf = tf_buffer.lookup_transform_core("world", "base_link", rclpy.time.Time())
+        tf = tf_buffer.lookup_transform_core("world", "base_link", patch_time)
         pose_with_cov = ArucoPreprocessorNode.tf2pose_with_cov(tf)
         pose_with_cov.pose.covariance = self.transform_covariance(
             aruco_pose.pose.covariance, aruco2base_link_tf
         )
+        # pose_with_cov.pose.covariance = self.covariance
+        # position = pose_with_cov.pose.pose.position
+        pose_with_cov.header.frame_id = "world"
+        pose_with_cov.header.stamp = self.get_clock().now().to_msg()
         return pose_with_cov
 
     def transform_covariance(self, cov: list, transform: TransformStamped) -> list:
diff --git a/src/pic_to_real/launch/bal_to_real.launch.py b/src/pic_to_real/launch/bal_to_real.launch.py
index a09f7ad4e0c83ca47ed1c2d5892324029ba12114..06ce117a77a35faad6b97c75693994939ab040fb 100644
--- a/src/pic_to_real/launch/bal_to_real.launch.py
+++ b/src/pic_to_real/launch/bal_to_real.launch.py
@@ -13,70 +13,15 @@ def load_yaml_file(file_path):
         return yaml.safe_load(file)
 
 
-def create_tf_nodes(static_transforms: dict):
-    tf_nodes = []
-    for transform in static_transforms["static_transforms"]:
-        node = Node(
-            package="tf2_ros",
-            executable="static_transform_publisher",
-            name=transform["child_frame_id"],
-            arguments=[
-                str(transform["translation"][0]),
-                str(transform["translation"][1]),
-                str(transform["translation"][2]),
-                str(transform["rotation"][0]),
-                str(transform["rotation"][1]),
-                str(transform["rotation"][2]),
-                str(transform["rotation"][3]),
-                transform["parent_frame_id"],
-                transform["child_frame_id"],
-            ],
-        )
-        tf_nodes.append(node)
-    return tf_nodes
-
-
-def create_tf_node():
-    node = Node(
-        package="tf2_ros",
-        executable="static_transform_publisher",
-        name="camera1",
-        arguments=[
-            str(0.0),
-            str(0.0),
-            str(1.0),
-            str(0.0),
-            str(0.0),
-            str(0.0),
-            str(1.0),
-            "world",
-            "camera1",
-        ],
-    )
-    return node
-
-
 def generate_launch_description():
     ld = LaunchDescription()
     camera_config1 = os.path.join(
         get_package_share_directory("server_main"), "config", "camera_params.yaml"
     )
-    camera_config2 = os.path.join(
+    os.path.join(
         get_package_share_directory("server_main"), "config", "camera_params2.yaml"
     )
 
-    # tf_config = os.path.join(
-    #     get_package_share_directory("calibration_node"),
-    #     "config",
-    #     "static_transforms.yaml",
-    # )
-    # # [TODO] run launch from calibration_package
-    # static_transforms = load_yaml_file(tf_config)
-    # transforms_nodes = create_tf_nodes(static_transforms)
-    # for node in transforms_nodes:
-    #     ld.add_action(node)
-    create_tf_node()
-
     yolo_dir = get_package_share_directory("yolo_openvino_ros2")
     yolo_config_dir = yolo_dir + "/config/"
 
@@ -104,18 +49,7 @@ def generate_launch_description():
             parameters=[os.path.join(yolo_config_dir, "yolo_roma.yaml")],
         )
     )
-    (
-        ld.add_action(
-            Node(
-                package="usb_cam",
-                executable="usb_cam_node_exe",
-                parameters=[camera_config2],
-                output="screen",
-                emulate_tty=True,
-                namespace="camera2",
-            )
-        ),
-    )
+
     ld.add_action(
         Node(
             package="yolo_openvino_ros2",
diff --git a/src/pic_to_real/pic_to_real/ball_to_real.py b/src/pic_to_real/pic_to_real/ball_to_real.py
index 2dd6a3e1b97a1f7170d4a3ec02881f21174f207c..69f5961ddbed9404dd8d51a0ba3c76f673e1dc7d 100644
--- a/src/pic_to_real/pic_to_real/ball_to_real.py
+++ b/src/pic_to_real/pic_to_real/ball_to_real.py
@@ -6,8 +6,6 @@ from geometry_msgs.msg import PointStamped, Pose2D
 
 from tf2_geometry_msgs import do_transform_point
 from tf2_ros import TransformException
-from tf2_ros.buffer import Buffer
-from tf2_ros.transform_listener import TransformListener
 import rclpy
 
 from skspatial.objects import Line, Plane
@@ -17,7 +15,7 @@ from tf2_ros import TransformListener, Buffer
 class Ball2RealNode(rclpy.node.Node):
     def __init__(self):
         super().__init__("Ball2RealNode")
-        self.declare_parameter("camera_frames", ["camera1"])
+        self.declare_parameter("camera_frames", ["camera1", "camera2"])
         self.camera_frames = self.get_parameter("camera_frames").value
         self.camera_models = {}  # {"frame_id": PinholeCameraModel}
         for frame in self.camera_frames:
@@ -48,7 +46,10 @@ class Ball2RealNode(rclpy.node.Node):
             )
             return
 
-        if msg.header.frame_id not in self.camera_frames:
+        if (
+            msg.header.frame_id not in self.camera_frames
+            or msg.header.frame_id not in self.tf_from_camera_to_world
+        ):
             self.get_logger().info(
                 f"No frame from yolo ball coord. Frame: {msg.header.frame_id}"
             )
@@ -93,7 +94,7 @@ class Ball2RealNode(rclpy.node.Node):
         self.ball_world_pub.publish(ball_world_msg)
         # self.ball_coordinate_world = (ray_to_ball_world.point.x, ray_to_ball_world.point.y, ray_to_ball_world.point.z)
 
-    def get_camera_to_world_frame(self):
+    def get_camera_to_world_frame(self) -> bool:
         try:
             for frame in self.camera_frames:
                 print(f"Camera  frame is {frame}")
@@ -102,8 +103,10 @@ class Ball2RealNode(rclpy.node.Node):
                         "world", frame, rclpy.time.Time()
                     )
                 )
+            return True
         except TransformException as ex:
             self.get_logger().info(f"Could not transform world to camera: {ex}")
+        return False
 
 
 def main(args=None):
diff --git a/src/proxy/proxy/Proxy.py b/src/proxy/proxy/low_level_proxy_node.py
similarity index 74%
rename from src/proxy/proxy/Proxy.py
rename to src/proxy/proxy/low_level_proxy_node.py
index 87fe97fb97ebe7c00743cb7945677e163a700c51..c478173a0703ec4393946ff4983abae2e357c956 100644
--- a/src/proxy/proxy/Proxy.py
+++ b/src/proxy/proxy/low_level_proxy_node.py
@@ -5,11 +5,12 @@ import rclpy
 from rclpy.node import Node
 
 from geometry_msgs.msg import Twist
+from std_msgs.msg import Int32
 
 import numpy as np
 
 
-class Proxy(Node):
+class LowLevelProxyNode(Node):
     def __init__(
         self,
         host: str = "localhost",
@@ -18,12 +19,18 @@ class Proxy(Node):
         connection_delay: float = 2.5,
         robot_name="ROKI-0058",
     ):
-        super().__init__("androsot_proxy")
+        super().__init__("low_level_proxy_node")
 
-        self.subscription = self.create_subscription(
+        self.aproach_subscription = self.create_subscription(
             msg_type=Twist,
             topic="plan_cmd_vel",
-            callback=self.listener_callback,
+            callback=self.approach_cb,
+            qos_profile=10,
+        )
+        self.action_subscription = self.create_subscription(
+            msg_type=Int32,
+            topic="action",
+            callback=self.action_cb,
             qos_profile=10,
         )
 
@@ -76,21 +83,31 @@ class Proxy(Node):
                 f"Connection refused after {self.conn_delay * self.n_conn_attempts} [sec]"
             )
 
-    async def listener_callback(self, msg: Twist) -> None:
+    async def approach_cb(self, msg: Twist) -> None:
         error_code = self.connection.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
 
         if error_code != 0:
-            self.get_logger().error("Lost connection to host")
             raise ConnectionError("Lost connection to host")
 
         self.writer.write(
-            f"{self.robot_name}|0|{int(msg.angular.z)}|0|{np.clip(int(-msg.linear.y), -128, 128)}|{np.clip(-int(msg.linear.x), -128, 128)}\n".encode()
+            f"{self.robot_name}|0|{-int(msg.angular.z)}|0|{np.clip(int(-msg.linear.y), -128, 128)}|{np.clip(-int(msg.linear.x), -128, 128)}\n".encode()
         )
         await self.writer.drain()
 
-        self.get_logger().info(
-            f"I heard: {self.robot_name}|0|{msg.angular.z}|0|{msg.linear.y}|{msg.linear.x}"
-        )
+        # self.get_logger().info(
+        #     f"I heard: {self.robot_name}|0|{msg.angular.z}|0|{msg.linear.y}|{msg.linear.x}"
+        # )
+
+    async def action_cb(self, msg: Int32):
+        error_code = self.connection.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+
+        if error_code != 0:
+            raise ConnectionError("Lost connection to host")
+        low_lvl_msg = f"{self.robot_name}|{msg.data}|0|0|0|0\n"
+        self.writer.write(low_lvl_msg.encode())
+        await self.writer.drain()
+
+        self.get_logger().info(f"I sent: {low_lvl_msg}")
 
     async def disconnect(self) -> None:
         if self.writer is not None:
@@ -102,7 +119,7 @@ class Proxy(Node):
 
 async def launch_proxy() -> None:
     try:
-        proxy = Proxy()
+        proxy = LowLevelProxyNode()
         await proxy.connect()
 
         cancel = proxy.create_guard_condition(lambda: None)
diff --git a/src/proxy/proxy/CommandSender.py b/src/proxy/proxy/rotation_command_sender_cli_node.py
similarity index 87%
rename from src/proxy/proxy/CommandSender.py
rename to src/proxy/proxy/rotation_command_sender_cli_node.py
index fb09bcc5ded16c53b61d3abfd4e559981ad0a47f..6bf7b0dee65e9260f694ed189b073b7155e33e55 100644
--- a/src/proxy/proxy/CommandSender.py
+++ b/src/proxy/proxy/rotation_command_sender_cli_node.py
@@ -4,9 +4,9 @@ from rclpy.node import Node
 from geometry_msgs.msg import Twist
 
 
-class CommandSender(Node):
+class RotationCommandSenderCLINode(Node):
     def __init__(self):
-        super().__init__("androsot_command_sender")
+        super().__init__("RotationCommandSenderCLINode")
         self.publisher_ = self.create_publisher(Twist, "plan_cmd_vel", 10)
 
         timer_period = 0.1  # seconds
@@ -32,7 +32,7 @@ class CommandSender(Node):
 def main(args=None):
     rclpy.init(args=args)
 
-    minimal_publisher = CommandSender()
+    minimal_publisher = RotationCommandSenderCLINode()
 
     rclpy.spin(minimal_publisher)
 
diff --git a/src/proxy/setup.py b/src/proxy/setup.py
index df80b02be45531a9b0db95b43fdecdf0ac627281..21afbbad2c168534b72d74115a4b25fac1a2c9d7 100644
--- a/src/proxy/setup.py
+++ b/src/proxy/setup.py
@@ -19,8 +19,8 @@ setup(
     tests_require=["pytest"],
     entry_points={
         "console_scripts": [
-            "command = proxy.CommandSender:main",
-            "proxy = proxy.Proxy:main",
+            "command = proxy.rotation_command_sender_cli:main",
+            "proxy = proxy.low_level_proxy_node:main",
         ],
     },
 )
diff --git a/src/server_main/config/approach_config.json b/src/server_main/config/approach_config.json
index c02a68ffd36b5f40a9c35f0be65bc18b414f218a..120ab6ba090baef16222d40a131bc12abb0ddd3f 100644
--- a/src/server_main/config/approach_config.json
+++ b/src/server_main/config/approach_config.json
@@ -11,13 +11,35 @@
 
         "approach":
         {
-            "distance_eps": 0.1,
-            "angular_eps": 0.1,
+            "distance_eps": 0.01,
+            "angular_eps": 0.25,
 
             "linear_velocity_x_scale": 1.0,
             "linear_velocity_y_scale": 1.0,
 
-            "angular_velocity_z_scale": 1.0
+            "angular_velocity_z_scale": 0.5
+        }
+    },
+    "1":
+    {
+        "robot":
+        {
+            "max_linear_velocity_x": 100,
+            "max_linear_velocity_y": 100,
+
+            "max_angular_velocity_z": 100
+        },
+
+        "approach":
+        {
+            "distance_eps": 0.05,
+            "angular_eps": 0.3,
+
+            "linear_velocity_x_scale": 1.0,
+            "linear_velocity_y_scale": 1.0,
+
+            "angular_velocity_z_scale": 0.5
         }
     }
+
 }
diff --git a/src/server_main/config/camera_info_1.yaml b/src/server_main/config/camera_info_1.yaml
deleted file mode 100644
index 9f037870f371007b127484771f7589ddf0073224..0000000000000000000000000000000000000000
--- a/src/server_main/config/camera_info_1.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera_1
-camera_matrix:
-  rows: 3
-  cols: 3
-  data: [1705.392612, 0.0, 1128.405272, 0.0, 1704.735765, 745.835355, 0.0, 0.0, 1.0]
-distortion_model: plumb_bob
-distortion_coefficients:
-  rows: 1
-  cols: 5
-  data: [0.037945, -0.088068, 0.000319, -0.003593, 0.0]
-rectification_matrix:
-  rows: 3
-  cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
-projection_matrix:
-  rows: 3
-  cols: 4
-  data: [1701.895874, 0.0, 1118.205854, 0.0, 0.0, 1713.358154, 745.596647, 0.0, 0.0, 0.0, 1.0, 0.0]
diff --git a/src/server_main/config/camera_info_1_hm.yaml b/src/server_main/config/camera_info_1_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93158b1704adf5053ee25f9205378957b7a136d5
--- /dev/null
+++ b/src/server_main/config/camera_info_1_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1421.1605100000002
+  - 0.0
+  - 940.3377266666666
+  - 0.0
+  - 1420.6131375
+  - 521.5294625
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera_1
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.037945
+  - -0.088068
+  - 0.000319
+  - -0.003593
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1418.0
+  - 0.0
+  - 931.0
+  - 0.0
+  - 0.0
+  - 1427.0
+  - 521.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_info_2.yaml b/src/server_main/config/camera_info_2.yaml
deleted file mode 100644
index 9ef6393ecd37d102f3dd2760e1fe173097754cc7..0000000000000000000000000000000000000000
--- a/src/server_main/config/camera_info_2.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera_2
-camera_matrix:
-  rows: 3
-  cols: 3
-  data: [1698.266418, 0.0, 1159.147169, 0.0, 1700.911909, 707.915321, 0.0, 0.0, 1.0]
-distortion_model: plumb_bob
-distortion_coefficients:
-  rows: 1
-  cols: 5
-  data: [0.027982, -0.091183, -0.000038, -0.000691, 0.0]
-rectification_matrix:
-  rows: 3
-  cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
-projection_matrix:
-  rows: 3
-  cols: 4
-  data: [1685.891479, 0.0, 1157.314982, 0.0, 0.0, 1702.888428, 707.170405, 0.0, 0.0, 0.0, 1.0, 0.0]
diff --git a/src/server_main/config/camera_info_2_hm.yaml b/src/server_main/config/camera_info_2_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..13c1305daa12140a3fcaf8bfa01ab4b002c76468
--- /dev/null
+++ b/src/server_main/config/camera_info_2_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1415.222015
+  - 0.0
+  - 965.9559741666668
+  - 0.0
+  - 1417.4265908333334
+  - 489.9294341666666
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera1
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.027982
+  - -0.091183
+  - -3.8e-05
+  - -0.000691
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1404.0
+  - 0.0
+  - 964.0
+  - 0.0
+  - 0.0
+  - 1419.0
+  - 489.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_info_3.yaml b/src/server_main/config/camera_info_3.yaml
deleted file mode 100644
index 48cc4aff80a5a23818e40d3a012282d9d8bda9d7..0000000000000000000000000000000000000000
--- a/src/server_main/config/camera_info_3.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera1
-camera_matrix:
-  rows: 3
-  cols: 3
-  data: [1667.826143, 0.0, 1138.575668, 0.0, 1670.183659, 780.932475, 0.0, 0.0, 1.0]
-distortion_model: plumb_bob
-distortion_coefficients:
-  rows: 1
-  cols: 5
-  data: [0.094722, -0.160338, -0.000437, 0.000212, 0.0]
-rectification_matrix:
-  rows: 3
-  cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
-projection_matrix:
-  rows: 3
-  cols: 4
-  data: [1682.388306, 0.0, 1137.875104, 0.0, 0.0, 1692.441040, 779.936789, 0.0, 0.0, 0.0, 1.0, 0.0]
diff --git a/src/server_main/config/camera_info_3_hm.yaml b/src/server_main/config/camera_info_3_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7060c1a1796513da2acb4de56ae0b07bc01033c3
--- /dev/null
+++ b/src/server_main/config/camera_info_3_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1389.8551191666668
+  - 0.0
+  - 948.8130566666666
+  - 0.0
+  - 1391.8197158333335
+  - 550.7770624999999
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera1
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.094722
+  - -0.160338
+  - -0.000437
+  - 0.000212
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1401.0
+  - 0.0
+  - 948.0
+  - 0.0
+  - 0.0
+  - 1410.0
+  - 549.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_info_4.yaml b/src/server_main/config/camera_info_4.yaml
index 45141a228a4acb219d88a9dc77d808c295a079bf..1b9f9a7bfc43365f7a4938d5d3be558d0a18d524 100644
--- a/src/server_main/config/camera_info_4.yaml
+++ b/src/server_main/config/camera_info_4.yaml
@@ -1,20 +1,20 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera2
+image_width: 1920
+image_height: 1080
+camera_name: camera1
 camera_matrix:
   rows: 3
   cols: 3
-  data: [1745.606619, 0.0, 1200.659341, 0.0, 1747.554903, 754.179311, 0.0, 0.0, 1.0]
+  data: [1422.222502, 0.000000, 991.494781, 0.000000, 1427.702678, 558.606602, 0.000000, 0.000000, 1.000000]
 distortion_model: plumb_bob
 distortion_coefficients:
   rows: 1
   cols: 5
-  data: [0.010119, -0.076276, 0.000320, 0.003884, 0.0]
+  data: [0.027580, -0.103098, 0.000247, 0.000938, 0.000000]
 rectification_matrix:
   rows: 3
   cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
+  data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
 projection_matrix:
   rows: 3
   cols: 4
-  data: [1724.136841, 0.0, 1212.366957, 0.0, 0.0, 1746.046509, 753.905950, 0.0, 0.0, 0.0, 1.0, 0.0]
+  data: [1407.457764, 0.000000, 995.120229, 0.000000, 0.000000, 1429.222778, 558.280777, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
diff --git a/src/server_main/config/camera_info_4_hm.yaml b/src/server_main/config/camera_info_4_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c7af6cd2104a3fb621744d36a6b4ad4ad130ee2
--- /dev/null
+++ b/src/server_main/config/camera_info_4_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1454.6721825
+  - 0.0
+  - 1000.5494508333334
+  - 0.0
+  - 1456.2957525
+  - 528.4827591666667
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera1
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.010119
+  - -0.076276
+  - 0.00032
+  - 0.003884
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1436.0
+  - 0.0
+  - 1010.0
+  - 0.0
+  - 0.0
+  - 1455.0
+  - 528.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_info_5.yaml b/src/server_main/config/camera_info_5.yaml
deleted file mode 100644
index a5244c8f7324bff196f49abf6ff770afcfdebd57..0000000000000000000000000000000000000000
--- a/src/server_main/config/camera_info_5.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera_5
-camera_matrix:
-  rows: 3
-  cols: 3
-  data: [1682.731043, 0.0, 1140.320594, 0.0, 1685.483053, 796.744407, 0.0, 0.0, 1.0]
-distortion_model: plumb_bob
-distortion_coefficients:
-  rows: 1
-  cols: 5
-  data: [0.115351, -0.200016, -0.000923, -0.000568, 0.0]
-rectification_matrix:
-  rows: 3
-  cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
-projection_matrix:
-  rows: 3
-  cols: 4
-  data: [1700.116211, 0.0, 1137.653527, 0.0, 0.0, 1712.949707, 795.213773, 0.0, 0.0, 0.0, 1.0, 0.0]
diff --git a/src/server_main/config/camera_info_5_hm.yaml b/src/server_main/config/camera_info_5_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..478b7dcc01dbfc820c25810603dfe44078e51f8b
--- /dev/null
+++ b/src/server_main/config/camera_info_5_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1402.2758691666668
+  - 0.0
+  - 950.2671616666668
+  - 0.0
+  - 1404.5692108333333
+  - 563.9536725
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera_5
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.115351
+  - -0.200016
+  - -0.000923
+  - -0.000568
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1416.0
+  - 0.0
+  - 948.0
+  - 0.0
+  - 0.0
+  - 1427.0
+  - 562.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_info_6.yaml b/src/server_main/config/camera_info_6.yaml
index 1cc541b39b920b85493dff330498fecc6ba6a142..c46baefc931d2fec8d8a7bc17264c3469b68ac96 100644
--- a/src/server_main/config/camera_info_6.yaml
+++ b/src/server_main/config/camera_info_6.yaml
@@ -1,20 +1,20 @@
-image_width: 2304
-image_height: 1536
-camera_name: camera_6
+image_width: 1920
+image_height: 1080
+camera_name: camera2
 camera_matrix:
   rows: 3
   cols: 3
-  data: [1671.934541, 0.0, 1174.049429, 0.0, 1673.408229, 777.959151, 0.0, 0.0, 1.0]
+  data: [1368.798164, 0.000000, 967.171879, 0.000000, 1372.332058, 561.632347, 0.000000, 0.000000, 1.000000]
 distortion_model: plumb_bob
 distortion_coefficients:
   rows: 1
   cols: 5
-  data: [0.105683, -0.176839, 0.003390, 0.003059, 0.0]
+  data: [0.091538, -0.153746, -0.000307, 0.000261, 0.000000]
 rectification_matrix:
   rows: 3
   cols: 3
-  data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
+  data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000]
 projection_matrix:
   rows: 3
   cols: 4
-  data: [1688.695312, 0.0, 1182.482974, 0.0, 0.0, 1701.707764, 781.526336, 0.0, 0.0, 0.0, 1.0, 0.0]
+  data: [1379.395874, 0.000000, 967.633706, 0.000000, 0.000000, 1389.802856, 560.814019, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000]
diff --git a/src/server_main/config/camera_info_6_hm.yaml b/src/server_main/config/camera_info_6_hm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..596e63861df7d97bf251c5a275c726c21576fc00
--- /dev/null
+++ b/src/server_main/config/camera_info_6_hm.yaml
@@ -0,0 +1,55 @@
+camera_matrix:
+  cols: 3
+  data:
+  - 1393.2787841666668
+  - 0.0
+  - 978.3745241666667
+  - 0.0
+  - 1394.5068575
+  - 548.2992925000001
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
+camera_name: camera2
+distortion_coefficients:
+  cols: 5
+  data:
+  - 0.105683
+  - -0.176839
+  - 0.00339
+  - 0.003059
+  - 0.0
+  rows: 1
+distortion_model: plumb_bob
+image_height: 1080
+image_width: 1920
+projection_matrix:
+  cols: 4
+  data:
+  - 1407.0
+  - 0.0
+  - 985.0
+  - 0.0
+  - 0.0
+  - 1418.0
+  - 551.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  rows: 3
+rectification_matrix:
+  cols: 3
+  data:
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  - 0.0
+  - 0.0
+  - 0.0
+  - 1.0
+  rows: 3
diff --git a/src/server_main/config/camera_params.yaml b/src/server_main/config/camera_params.yaml
index 1f0a4cd2f8cf3db0aeef68fac58a8f7c68d027bb..3f96e07c4ceb766f18f6113d22ffc1a475b1d367 100644
--- a/src/server_main/config/camera_params.yaml
+++ b/src/server_main/config/camera_params.yaml
@@ -1,14 +1,14 @@
 /**:
     ros__parameters:
-      video_device: "/dev/video4"
+      video_device: "/dev/video2"
       framerate: 30.0
       io_method: "mmap"
       frame_id: "camera1"
-      pixel_format: "yuyv"  # see usb_cam/supported_formats for list of supported formats
-      image_width: 2304
-      image_height: 1536
+      pixel_format: "mjpeg2rgb"  # see usb_cam/supported_formats for list of supported formats
+      image_width: 1920
+      image_height: 1080
       camera_name: "camera1"
-      camera_info_url: "package://server_main/config/camera_info_2.yaml"
+      camera_info_url: "package://server_main/config/camera_info_1_hm.yaml"
       brightness: -1
       contrast: -1
       saturation: -1
diff --git a/src/server_main/config/camera_params2.yaml b/src/server_main/config/camera_params2.yaml
index 631d2cdfb8ad741a9f5d26daffc1c0df09df8c14..f58a54122e781b3376b7b552d30d208cdf679db2 100644
--- a/src/server_main/config/camera_params2.yaml
+++ b/src/server_main/config/camera_params2.yaml
@@ -8,7 +8,7 @@
       image_width: 1920
       image_height: 1080
       camera_name: "camera2"
-      camera_info_url: "package://server_main/config/camera_info_6.yaml"
+      camera_info_url: "package://server_main/config/camera_info_4.yaml"
       brightness: -1
       contrast: -1
       saturation: -1
diff --git a/src/server_main/config/camera_params_converter.py b/src/server_main/config/camera_params_converter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a868a614560202ea85da38fab775febb09e87603
--- /dev/null
+++ b/src/server_main/config/camera_params_converter.py
@@ -0,0 +1,58 @@
+import yaml
+import argparse
+import os
+
+CROP_PARAM_Y = 200
+K = 1.2
+
+
+def resize_camera_matrix_inplace(yaml_file, k, crop_param_y):
+    with open(yaml_file, "r") as file:
+        data = yaml.safe_load(file)
+
+    camera_matrix = data["camera_matrix"]["data"]
+
+    camera_matrix[0] /= k  # a_x
+    camera_matrix[2] /= k  # u_x
+    camera_matrix[4] /= k  # a_y
+    camera_matrix[5] /= k  # v_y
+    camera_matrix[5] -= crop_param_y // 2
+
+    data["camera_matrix"]["data"] = camera_matrix
+
+    data["image_width"] //= k
+    data["image_height"] //= k
+    data["image_height"] -= crop_param_y
+    data["image_width"] = int(data["image_width"])
+    data["image_height"] = int(data["image_height"])
+
+    projection_matrix = data["projection_matrix"]["data"]
+    projection_matrix[0] //= k
+    projection_matrix[2] //= k
+    projection_matrix[5] //= k
+    projection_matrix[6] //= k
+    projection_matrix[6] -= crop_param_y // 2
+
+    with open(yaml_file, "w") as file:
+        yaml.safe_dump(data, file)
+
+    print(f"Updated camera matrix and image dimensions saved in '{yaml_file}'")
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Resize camera matrix and image dimensions in a YAML file."
+    )
+    parser.add_argument("yaml_file", type=str, help="Path to the YAML file.")
+
+    args = parser.parse_args()
+
+    if not os.path.isfile(args.yaml_file):
+        print(f"Error: The file '{args.yaml_file}' does not exist.")
+        return
+
+    resize_camera_matrix_inplace(args.yaml_file, K, CROP_PARAM_Y)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/src/server_main/config/connections_config.json b/src/server_main/config/connections_config.json
index 8767260fb7c854e888fa96da0f5f7db369940b57..2a2756e720e3a4885e3a08de9b00298c3a3793a7 100644
--- a/src/server_main/config/connections_config.json
+++ b/src/server_main/config/connections_config.json
@@ -2,8 +2,13 @@
     "0":
     {
         "device": "ttyUSB0",
+        "robot_name": "ROKI-0058",
+        "port": "1970"
+    },
+    "1":
+    {
+        "device": "ttyUSB1",
         "robot_name": "ROKI-0059",
-        "port": "1969"
+        "port": "1971"
     }
-
 }
diff --git a/src/server_main/config/robots_config.json b/src/server_main/config/robots_config.json
index 0badc3e80de12a9bfad0ad1b78f2d3c97009f91c..d66edcefc9c9552e872a42c33281b240504df650 100644
--- a/src/server_main/config/robots_config.json
+++ b/src/server_main/config/robots_config.json
@@ -1,26 +1,17 @@
 {
 	"robots": {
-		"0": {
-			"aruco_ids": [0, 2, 4, 6, 8],
-			"aruco_transforms": [
-			"front", "right", "back", "left", "up"
-			]
-		},
 		"1": {
-			"aruco_ids": [10, 12, 14, 16, 18],
+			"aruco_ids": [1, 3, 5, 7, 9],
 			"aruco_transforms": [
-			"front", "left", "back", "right", "up"
+			"front", "right", "back", "left", "up"
 			]
 		},
-		"2": {
-			"aruco_ids": [20, 22, 24, 26, 28],
+		"0": {
+			"aruco_ids": [11, 13, 15, 17, 19],
 			"aruco_transforms": [
-				[0, 0, 0],
-				[0, -1.57, 0],
-				[0, 3.14, 0],
-				[0, 1.57, 0],
-				[1.57, 0, 0]
+			"front", "right", "back", "left", "up"
 			]
 		}
+
 	}
 }
diff --git a/src/server_main/launch/multi_team_multi_camera.launch.py b/src/server_main/launch/multi_team_multi_camera.launch.py
index c584f277a234ab11c2aa6c4891d40e24a43ee351..2a6517c78c8467164aad973b0023286880d5b8fa 100644
--- a/src/server_main/launch/multi_team_multi_camera.launch.py
+++ b/src/server_main/launch/multi_team_multi_camera.launch.py
@@ -17,7 +17,7 @@ def generate_launch_description():
     )
     yolo_dir = get_package_share_directory("yolo_openvino_ros2")
     robot_config2 = os.path.join(
-        get_package_share_directory("server_main"), "config", "robots_config2.json"
+        get_package_share_directory("server_main"), "config", "robots_config.json"
     )
     config_dir = yolo_dir + "/config/"
     Node(
diff --git a/src/server_main/launch/solo_camera_multi_team.launch.py b/src/server_main/launch/solo_camera_multi_team.launch.py
index 462a7bd3509487075c6f84708b6b807847ba223a..6152cf87f19af323c6f578d83a6d96c563148ef7 100644
--- a/src/server_main/launch/solo_camera_multi_team.launch.py
+++ b/src/server_main/launch/solo_camera_multi_team.launch.py
@@ -14,7 +14,7 @@ def generate_launch_description():
     )
 
     robot_config2 = os.path.join(
-        get_package_share_directory("server_main"), "config", "robots_config2.json"
+        get_package_share_directory("server_main"), "config", "robots_config.json"
     )
     yolo_dir = get_package_share_directory("yolo_openvino_ros2")
     config_dir = yolo_dir + "/config/"
diff --git a/src/simple_approach/launch/approach.launch.py b/src/simple_approach/launch/approach.launch.py
index 77163b2c9263cc1e35dd0085cdad94f9a6056779..22340aa1c1d5bba308cb24a9346b370ed2b87e1d 100644
--- a/src/simple_approach/launch/approach.launch.py
+++ b/src/simple_approach/launch/approach.launch.py
@@ -9,7 +9,7 @@ from launch.actions import ExecuteProcess
 from launch_ros.actions import Node
 
 
-class LaunchUnitsCreator:
+class ApprouchLaunchCreator:
     def __init__(self, connections_config_path: str, approach_config_path: str) -> None:
         self.__connections_config = self.__read_connections_config(
             connections_config_path
@@ -103,6 +103,6 @@ def generate_launch_description():
         get_package_share_directory("server_main"), "config", "approach_config.json"
     )
 
-    return LaunchUnitsCreator(
+    return ApprouchLaunchCreator(
         connections_config_path, approach_config_path
     ).create_launch()
diff --git a/src/simple_approach/simple_approach/ApproachNode.py b/src/simple_approach/simple_approach/ApproachNode.py
index 701134381c7b9e4bc513a9d65b350dc482ade34d..1ffed558f061bf02828bdd6474f94f96dd47c21e 100644
--- a/src/simple_approach/simple_approach/ApproachNode.py
+++ b/src/simple_approach/simple_approach/ApproachNode.py
@@ -1,3 +1,5 @@
+import sys
+
 import json
 
 from dataclasses import dataclass
@@ -26,7 +28,9 @@ class ApproachNode(Node):
 
         self.__cmd_velocity_publisher = self.create_publisher(Twist, "plan_cmd_vel", 10)
 
-        self.__lacalization_subscriber = self.create_subscription(
+        # self.create_timer(0.2, self.__cmd_vel_cb)
+
+        self.__localization_subscriber = self.create_subscription(
             Pose2D, "localization", self.__position_callback, 10
         )
 
@@ -34,6 +38,10 @@ class ApproachNode(Node):
             Pose2D, "goal_position", self.__goal_position_callback, 10
         )
 
+        self.__ball_position_subscriber = self.create_subscription(
+            Pose2D, "/ball_coordinate_world", self.__ball_position_cb, 10
+        )
+
         self.__approach_config_path = str()
         self.__robot_id = str()
         self.__declare_params()
@@ -68,12 +76,31 @@ class ApproachNode(Node):
             RobotConfig(**conf[robot_id]["robot"]),
         )
 
+    def __cmd_vel_cb(self) -> None:
+        self.__cmd_velocity_publisher.publish(self.__approach.next_twist())
+
     def __position_callback(self, msg: Pose2D) -> None:
         self.__approach.robot.position = msg
         self.__cmd_velocity_publisher.publish(self.__approach.next_twist())
+        # self.get_logger().info(
+        #     f"dist = {round(self.__approach.dist_error, 2)} angular = {round(self.__approach.angular_error, 2)}, theta={self.__approach.theta}, exp={self.__approach.exp_theta}"
+        # )
 
     def __goal_position_callback(self, msg: Pose2D) -> None:
         self.__approach.target_position = msg
+        self.__approach.planner.update_waypoints(
+            self.__approach.robot.position, self.__approach.target_position
+        )
+
+    def __ball_position_cb(self, msg: Pose2D) -> None:
+        print("OBSTACLES CALLBACK", file=sys.stderr)
+        self.__approach.planner.obstacles.clear()
+        self.__approach.planner.add_obstacle(
+            msg, radius=0.5, has_weak_area=True, weak_angle=0.0, weak_range=2.0
+        )
+        self.__approach.planner.update_waypoints(
+            self.__approach.robot.position, self.__approach.target_position
+        )
 
     @staticmethod
     def __read_approach_config(approach_config_path: str) -> dict:
diff --git a/src/simple_approach/simple_approach/SimpleApproach.py b/src/simple_approach/simple_approach/SimpleApproach.py
index 554a8a2bd4225a4232f49bf93f7bc542237337f2..bb0f772266520c278ab9bc9f311a999bf08a1718 100644
--- a/src/simple_approach/simple_approach/SimpleApproach.py
+++ b/src/simple_approach/simple_approach/SimpleApproach.py
@@ -1,11 +1,15 @@
 from dataclasses import dataclass
 
+import math
+
 from geometry_msgs.msg import Pose2D
 from geometry_msgs.msg import Twist
 from geometry_msgs.msg import Vector3
 
 import numpy as np
 
+from simple_approach.SimplePlanner import SimplePlanner
+
 # TODO: maybe find package with such functions or create own
 
 
@@ -14,9 +18,7 @@ def distance(lhs: Pose2D, rhs: Pose2D) -> float:
 
 
 def angular_distance(lhs: Pose2D, rhs: Pose2D) -> float:
-    dist = lhs.theta - rhs.theta
-
-    return np.pi - dist if dist > np.pi else np.pi + dist if dist < -np.pi else dist
+    return math.remainder(lhs.theta - rhs.theta, 2.0 * np.pi)
 
 
 def rotation_angle(lhs: Pose2D, rhs: Pose2D) -> float:
@@ -105,6 +107,8 @@ class SimpleApproach:
 
         self.__target_position = Pose2D()
 
+        self.planner = SimplePlanner()
+
     @property
     def target_position(self) -> Pose2D:
         return self.__target_position
@@ -117,46 +121,110 @@ class SimpleApproach:
     def target_position(self, new_position: Pose2D) -> None:
         self.__target_position = new_position
 
-    def __is_near_expected_position(self, expected: Pose2D) -> bool:
-        return distance(self.__robot.position, expected) < self.__distance_eps
+    @property
+    def distance_eps(self) -> float:
+        return self.__distance_eps
 
-    def __is_near_expected_direction(self, expected: Pose2D) -> bool:
-        return abs(self.__robot.position.theta - expected.theta) < self.__angular_eps
+    def is_near_expected_position(self, expected: Pose2D) -> bool:
+        self.dist_error = distance(self.__robot.position, expected)
+        return self.dist_error < self.__distance_eps
+
+    def is_near_expected_direction(self, expected: Pose2D) -> bool:
+        # self.theta = self.__robot.position.theta
+        # self.exp_theta = expected.theta
+        self.angular_error = abs(angular_distance(expected, self.__robot.position))
+        return self.angular_error < self.__angular_eps
+
+    def distance_velocity_scale(self, delta: float, eps: float):
+        return (
+            1.0
+            if np.abs(delta) > 3.0 * eps
+            else np.abs(delta) / (3.0 * eps)
+            if np.abs(delta) > eps
+            else 0.0
+        )
 
     def __rotate(self) -> float:
         return (
             self.__target_position.theta
-            if self.__is_near_expected_position(self.__target_position)
+            if self.is_near_expected_position(self.__target_position)
             else rotation_angle(self.__target_position, self.__robot.position)
         )
 
-    def __next_position(self) -> Pose2D:
+    def next_position(self) -> Pose2D:
         return Pose2D(
             x=self.__target_position.x,
             y=self.__target_position.y,
             theta=self.__rotate(),
         )
 
-    def __goal_direction(self, goal: Pose2D) -> float:
+    def goal_direction(self, goal: Pose2D) -> float:
         goal_vector = np.array([[goal.x], [goal.y]])
         _coords = self.__robot.tf_matrix @ (goal_vector - self.__robot.position_vector)
 
         return np.arctan2(_coords[1][0], _coords[0][0])
 
     def __linear_vel(self, goal: Pose2D) -> Vector3:
-        if self.__is_near_expected_position(goal):
+        if self.is_near_expected_position(goal) or not self.is_near_expected_direction(
+            goal
+        ):
             return Vector3(x=0.0, y=0.0, z=0.0)
 
-        phi = self.__goal_direction(goal)
+        phi = self.goal_direction(goal)
 
         return Vector3(
-            x=self.__robot.x_vel * np.cos(phi) * self.__linear_v_x_scale,
-            y=self.__robot.y_vel * np.sin(phi) * self.__linear_v_y_scale,
+            x=self.__robot.x_vel
+            * np.cos(phi)
+            * self.__linear_v_x_scale
+            * self.distance_velocity_scale(
+                distance(self.__robot.position, self.target_position),
+                self.__distance_eps,
+            ),
+            y=self.__robot.y_vel
+            * np.sin(phi)
+            * self.__linear_v_y_scale
+            * self.distance_velocity_scale(
+                distance(self.__robot.position, self.target_position),
+                self.__distance_eps,
+            ),
             z=0.0,
         )
 
     def __angular_vel(self, goal: Pose2D) -> Vector3:
-        if self.__is_near_expected_direction(goal):
+        if not self.is_near_expected_position(goal):
+            return Vector3(
+                x=0.0,
+                y=0.0,
+                z=(
+                    self.__robot.yaw_vel
+                    * np.sign(
+                        angular_distance(
+                            Pose2D(
+                                x=goal.x,
+                                y=goal.y,
+                                theta=self.goal_direction(goal)
+                                + self.__robot.position.theta,
+                            ),
+                            self.__robot.position,
+                        )
+                    )
+                    * self.__angular_v_z_scale
+                    * self.distance_velocity_scale(
+                        angular_distance(
+                            Pose2D(
+                                x=goal.x,
+                                y=goal.y,
+                                theta=self.goal_direction(goal)
+                                + self.__robot.position.theta,
+                            ),
+                            self.__robot.position,
+                        ),
+                        self.__angular_eps,
+                    )
+                ),
+            )
+
+        if self.is_near_expected_direction(goal):
             return Vector3(x=0.0, y=0.0, z=0.0)
 
         return Vector3(
@@ -168,7 +236,19 @@ class SimpleApproach:
         )
 
     def next_twist(self):
-        goal_position = self.__next_position()
+        goal_position = self.planner.next_waypoint()
+        while (
+            goal_position is not None
+            and self.is_near_expected_position(goal_position)
+            and self.is_near_expected_direction(goal_position)
+        ):
+            self.planner.waypoints.pop()
+            goal_position = self.planner.next_waypoint()
+
+        if goal_position is None:
+            return Twist()
+
+        # goal_position = self.next_position()
 
         return Twist(
             linear=self.__linear_vel(goal_position),
diff --git a/src/simple_approach/simple_approach/SimplePlanner.py b/src/simple_approach/simple_approach/SimplePlanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..03b94751ebbd507654242ff6a6022ab12bfc01e0
--- /dev/null
+++ b/src/simple_approach/simple_approach/SimplePlanner.py
@@ -0,0 +1,327 @@
+import sys
+
+from dataclasses import dataclass
+
+import math
+
+import numpy as np
+
+from geometry_msgs.msg import Pose2D
+
+
+@dataclass
+class Obstacle:
+    x: float
+    y: float
+    radius: float
+    has_weak_area: bool
+    weak_range: float
+    weak_angle: float
+
+
+@dataclass
+class Line:
+    a: float
+    b: float
+    c: float
+
+    def distance(self, position: Pose2D) -> float:
+        return abs(self.a * position.x + self.b * position.y + self.c) / np.sqrt(
+            self.a * self.a + self.b * self.b
+        )
+
+
+def tf_from_position(position: Pose2D) -> np.ndarray:
+    return np.array(
+        [
+            [np.cos(position.theta), np.sin(position.theta)],
+            [-np.sin(position.theta), np.cos(position.theta)],
+        ]
+    )
+
+
+def direction_to_goal(current: Pose2D, goal: Pose2D) -> float:
+    goal_vector = np.array([[goal.x], [goal.y]])
+    current_vector = np.array([[current.x], [current.y]])
+
+    coordinates = tf_from_position(current) @ (goal_vector - current_vector)
+
+    return np.arctan2(coordinates[1][0], coordinates[0][0])
+
+
+def build_line_from_positions(lhs: Pose2D, rhs: Pose2D) -> tuple:
+    a, b = lhs.y - rhs.y, -(lhs.x - rhs.x)
+
+    c = -a * lhs.x - b * lhs.y
+
+    return Line(a, b, c)
+
+
+def distance(lhs: Pose2D, rhs: Pose2D) -> float:
+    return ((lhs.x - rhs.x) ** 2 + (lhs.y - rhs.y) ** 2) ** 0.5
+
+
+def in_weak_area(obstacle: Obstacle, target: Pose2D) -> bool:
+    # print(f"Direction to goal: {-direction_to_goal(Pose2D(x=obstacle.x, y=obstacle.y, theta=0.0), target)}", file=sys.stderr)
+    # print(f"Weak angle: {obstacle.weak_angle}", file=sys.stderr)
+
+    return distance(
+        Pose2D(x=obstacle.x, y=obstacle.y, theta=0.0), target
+    ) < obstacle.radius and in_weak_range(obstacle, target)
+
+
+def in_weak_range(obstacle: Obstacle, target: Pose2D) -> bool:
+    return (
+        abs(
+            -direction_to_goal(Pose2D(x=obstacle.x, y=obstacle.y, theta=0.0), target)
+            - obstacle.weak_angle
+        )
+        < obstacle.weak_range / 2.0
+    )
+
+
+class SimplePlanner:
+    def __init__(self):
+        self.obstacles = []
+        self.waypoints = []
+
+    def is_near_expected_position(self, current: Pose2D, expected: Pose2D) -> bool:
+        self.dist_error = distance(current, expected)
+        return self.dist_error < 0.2
+
+    def is_near_expected_direction(self, current: Pose2D, expected: Pose2D) -> bool:
+        # self.theta = self.__robot.position.theta
+        # self.exp_theta = expected.theta
+        self.angular_error = abs(
+            math.remainder(expected.theta - current.theta, 2.0 * math.pi)
+        )
+        return self.angular_error < 0.2
+
+    def add_obstacle(
+        self,
+        obstacle_pose: Pose2D,
+        radius: float,
+        has_weak_area: bool = False,
+        weak_angle: float = None,
+        weak_range: float = None,
+    ) -> None:
+        self.obstacles.append(
+            Obstacle(
+                x=obstacle_pose.x,
+                y=obstacle_pose.y,
+                radius=radius,
+                has_weak_area=has_weak_area,
+                weak_angle=weak_angle,
+                weak_range=weak_range,
+            )
+        )
+
+    def goal_to_waypoints(self, current: Pose2D, goal: Pose2D) -> list[Obstacle]:
+        result = []
+        if not self.is_near_expected_position(current, goal):
+            if not self.is_near_expected_direction(
+                current,
+                Pose2D(
+                    x=current.x, y=current.y, theta=direction_to_goal(current, goal)
+                ),
+            ):
+                result.append(
+                    Pose2D(
+                        x=current.x,
+                        y=current.y,
+                        theta=direction_to_goal(current, goal) + current.theta,
+                    )
+                )
+
+            result.append(
+                Pose2D(
+                    x=goal.x,
+                    y=goal.y,
+                    theta=direction_to_goal(current, goal) + current.theta,
+                )
+            )
+
+        result.append(goal)
+
+        return result
+
+    def build_waypoints(self, current: Pose2D, goal: Pose2D) -> tuple:
+        line = build_line_from_positions(current, goal)
+
+        nearest_obstacles = []
+        for obstacle in self.obstacles:
+            obstacle_pose = Pose2D(x=obstacle.x, y=obstacle.y, theta=0.0)
+
+            print(
+                f"In weak area current: {in_weak_area(obstacle, current)}",
+                file=sys.stderr,
+            )
+            if (
+                (
+                    distance(obstacle_pose, current) <= distance(current, goal)
+                    or (obstacle.has_weak_area and not in_weak_range(obstacle, current))
+                )
+                and line.distance(obstacle_pose) < obstacle.radius
+                and (
+                    distance(obstacle_pose, goal) > obstacle.radius
+                    or obstacle.has_weak_area
+                    and in_weak_area(obstacle, goal)
+                    and not in_weak_range(obstacle, current)
+                )
+                and (
+                    (obstacle_pose.x - current.x) * (goal.x - current.x)
+                    + (obstacle_pose.y - current.y) * (goal.y - current.y)
+                    > 0.0
+                    or distance(obstacle_pose, goal) <= obstacle.radius
+                )
+                and (
+                    distance(obstacle_pose, current) > obstacle.radius
+                    or obstacle.has_weak_area
+                    and in_weak_area(obstacle, goal)
+                )
+            ):
+                # print(f"Line distance: {line.distance(obstacle_pose)}", file=sys.stderr)
+                nearest_obstacles.append(obstacle)
+
+        nearest_obstacles.sort(
+            key=lambda obstacle, pose=current: distance(
+                Pose2D(x=obstacle.x, y=obstacle.y, theta=0.0), current
+            )
+        )
+
+        if len(nearest_obstacles) > 0:
+            obstacle_pose = Pose2D(
+                x=nearest_obstacles[0].x, y=nearest_obstacles[0].y, theta=0.0
+            )
+
+            dist = line.distance(obstacle_pose)
+
+            dist_to_radius = np.sqrt(distance(obstacle_pose, current) ** 2 - dist**2)
+            dist_to_avoidance = np.sqrt(
+                dist_to_radius**2 + (obstacle.radius - dist) ** 2
+            )
+
+            phi_1 = np.arctan2(obstacle_pose.y - current.y, obstacle_pose.x - current.x)
+            phi_2 = np.arccos(
+                (
+                    dist_to_avoidance**2
+                    + distance(current, obstacle_pose) ** 2
+                    - obstacle.radius**2
+                )
+                / (2.0 * dist_to_avoidance * distance(current, obstacle_pose))
+            )
+
+            avoidance_point_1 = Pose2D(
+                x=current.x + dist_to_avoidance * np.cos(phi_1 + phi_2),
+                y=current.y + dist_to_avoidance * np.sin(phi_1 + phi_2),
+            )
+            avoidance_point_1.theta = direction_to_goal(avoidance_point_1, goal)
+
+            # waypoints_1 = [avoidance_point_1]
+            # length_1 = distance(current, avoidance_point_1)
+
+            waypoints_1, length_1 = (
+                self.goal_to_waypoints(current, avoidance_point_1),
+                distance(current, avoidance_point_1),
+            )
+
+            if obstacle.has_weak_area and in_weak_area(obstacle, goal):
+                mean_point = Pose2D(
+                    x=obstacle.x + obstacle.radius * np.cos(obstacle.weak_angle),
+                    y=obstacle.y - obstacle.radius * np.sin(obstacle.weak_angle),
+                    theta=0.0,
+                )
+                # waypoints_1.append(mean_point)
+                # waypoints_1.append(goal)
+                # length_1 += distance(avoidance_point_1, mean_point)
+                # length_1 += distance(mean_point, goal)
+
+                to_mean_wp, to_mean_length = (
+                    self.goal_to_waypoints(avoidance_point_1, mean_point),
+                    distance(avoidance_point_1, mean_point),
+                )
+                to_goal_wp, to_goal_length = (
+                    self.goal_to_waypoints(mean_point, goal),
+                    distance(mean_point, goal),
+                )
+
+                waypoints_1 += to_mean_wp + to_goal_wp
+                length_1 += to_mean_length + to_goal_length
+            else:
+                _wp_1, _l_1 = (
+                    self.goal_to_waypoints(avoidance_point_1, goal),
+                    distance(avoidance_point_1, goal),
+                )
+                # waypoints_1.append(goal)
+                length_1 += distance(avoidance_point_1, goal)
+
+            # waypoints_1, length_1 = self.build_waypoints(current, avoidance_point_1)
+            # wp_1, l_1 = self.build_waypoints(avoidance_point_1, goal)
+
+            avoidance_point_2 = Pose2D(
+                x=current.x + dist_to_avoidance * np.cos(phi_1 - phi_2),
+                y=current.y + dist_to_avoidance * np.sin(phi_1 - phi_2),
+            )
+            avoidance_point_2.theta = direction_to_goal(avoidance_point_2, goal)
+
+            # waypoints_2 = [avoidance_point_2]
+            # length_2 = distance(current, avoidance_point_2)
+
+            waypoints_2, length_2 = (
+                self.goal_to_waypoints(current, avoidance_point_2),
+                distance(current, avoidance_point_2),
+            )
+
+            if obstacle.has_weak_area and in_weak_area(obstacle, goal):
+                mean_point = Pose2D(
+                    x=obstacle.x + obstacle.radius * np.cos(obstacle.weak_angle),
+                    y=obstacle.y - obstacle.radius * np.sin(obstacle.weak_angle),
+                    theta=0.0,
+                )
+                # waypoints_2.append(mean_point)
+                # waypoints_2.append(goal)
+                # length_2 += distance(avoidance_point_2, mean_point)
+                # length_2 += distance(mean_point, goal)
+
+                to_mean_wp, to_mean_length = (
+                    self.goal_to_waypoints(avoidance_point_2, mean_point),
+                    distance(avoidance_point_2, mean_point),
+                )
+                to_goal_wp, to_goal_length = (
+                    self.goal_to_waypoints(mean_point, goal),
+                    distance(mean_point, goal),
+                )
+
+                waypoints_2 += to_mean_wp + to_goal_wp
+                length_2 += to_mean_length + to_goal_length
+            else:
+                _wp_2, _l_2 = (
+                    self.goal_to_waypoints(avoidance_point_2, goal),
+                    distance(avoidance_point_2, goal),
+                )
+                # waypoints_2.append(goal)
+                length_2 += distance(avoidance_point_2, goal)
+
+            # waypoints_2, length_2 = self.build_waypoints(current, avoidance_point_2)
+            # wp_2, l_2 = self.build_waypoints(avoidance_point_2, goal)
+
+            if length_1 < length_2:
+                return waypoints_1, length_1
+
+            return waypoints_2, length_2
+
+        return self.goal_to_waypoints(current, goal), distance(current, goal)
+
+    def update_waypoints(self, current: Pose2D, goal: Pose2D):
+        self.waypoints, _ = self.build_waypoints(current, goal)
+        self.waypoints.reverse()
+        print("WAYPOINTS:", file=sys.stderr)
+        for waypoint in reversed(self.waypoints):
+            print(waypoint, file=sys.stderr)
+
+        print("OBSTACLES:", file=sys.stderr)
+        for obstacle in reversed(self.obstacles):
+            print(f"x: {obstacle.x}, y: {obstacle.y}", file=sys.stderr)
+
+    def next_waypoint(self) -> Pose2D:
+        return self.waypoints[-1] if len(self.waypoints) > 0 else None
diff --git a/src/simple_approach/simple_approach/approach_test.py b/src/simple_approach/simple_approach/approach_test.py
index efe12e3128c8c7897d4806b30586faf477a6c586..1679be634eb7c51df9e372424096760922ad997c 100644
--- a/src/simple_approach/simple_approach/approach_test.py
+++ b/src/simple_approach/simple_approach/approach_test.py
@@ -7,6 +7,8 @@ import numpy as np
 from SimpleApproach import RobotConfig
 from SimpleApproach import SimpleApproach, ApproachConfig
 
+from SimpleApproach import angular_distance
+
 
 def close_vectors(lhs: Vector3, rhs: Vector3, delta: float = 0.1) -> bool:
     return (
@@ -292,3 +294,22 @@ def test_far_from_goal_pose():
             angular=Vector3(x=0.0, y=0.0, z=-100.0),
         ),
     )
+
+
+def test_turn_computations():
+    for goal in np.linspace(-np.pi, np.pi, 1000):
+        for current in np.linspace(-np.pi, np.pi, 1000):
+            assert (
+                -np.pi
+                <= angular_distance(
+                    Pose2D(x=0.0, y=0.0, theta=goal),
+                    Pose2D(x=0.0, y=0.0, theta=current),
+                )
+                <= np.pi
+            )
+
+    print(
+        angular_distance(
+            Pose2D(x=0.0, y=0.0, theta=3.0), Pose2D(x=0.0, y=0.0, theta=-3.0)
+        )
+    )
diff --git a/src/strategy/launch/game.launch.py b/src/strategy/launch/game.launch.py
index bd4cbbc8f592cbb929e4e734131a0fa6d1045561..e0f36c90cb9c4f8018cfda8f07e8bc96ca4a2ed4 100644
--- a/src/strategy/launch/game.launch.py
+++ b/src/strategy/launch/game.launch.py
@@ -4,6 +4,9 @@ import os
 from ament_index_python.packages import get_package_share_directory
 import yaml
 import json
+from launch.actions import IncludeLaunchDescription
+from launch_ros.substitutions import FindPackageShare
+from launch.launch_description_sources import PythonLaunchDescriptionSource
 
 
 def load_json_file(file_path):
@@ -16,35 +19,49 @@ def load_yaml_file(file_path):
         return yaml.safe_load(file)
 
 
-def create_tf_nodes(static_transforms: dict):
-    tf_nodes = []
-    for transform in static_transforms["static_transforms"]:
-        node = Node(
-            package="tf2_ros",
-            executable="static_transform_publisher",
-            name=transform["child_frame_id"],
-            arguments=[
-                str(transform["translation"][0]),
-                str(transform["translation"][1]),
-                str(transform["translation"][2]),
-                str(transform["rotation"][0]),
-                str(transform["rotation"][1]),
-                str(transform["rotation"][2]),
-                str(transform["rotation"][3]),
-                transform["parent_frame_id"],
-                transform["child_frame_id"],
-            ],
-        )
-        tf_nodes.append(node)
-    return tf_nodes
+ROBOT_NAMESPACE_TEMPLATE = "robot_{id}"
+LOCALIZATION_TOPIC_TEMPLATE = "robot_{id}/localization"
+POSE_TOPIC_TEMPLATE = "robot_{id}/pose"
+SET_POSE_TOPIC_TEMPLATE = "robot_{id}/set_pose"
+ODOMETRY_FILTERED_TEMPLATE = "robot_{id}/odometry/filtered"
+FALLEN_TOPIC_TEMPLATE = "robot_{id}/fallen"
+FILTERED_TOPIC_TEMPLATE = "robot_{id}/filtered_3d_pose"
 
 
 def create_localization_nodes(robots_config) -> list[Node]:
     robots = load_json_file(robots_config)["robots"]
+    localizataion_nodes = []
+    config = os.path.join(
+        get_package_share_directory("localisation"), "config", "androsot.yaml"
+    )
+
     for robot_id in robots:
-        # preprocessor node
+        ekf_node = Node(
+            package="robot_localization",
+            executable="ekf_node",
+            parameters=[config],
+            remappings=[
+                ("pose", POSE_TOPIC_TEMPLATE.format(id=robot_id)),
+                ("odometry/filtered", ODOMETRY_FILTERED_TEMPLATE.format(id=robot_id)),
+                ("set_pose", SET_POSE_TOPIC_TEMPLATE.format(id=robot_id)),
+            ],
+        )
+
+        decode_node = Node(
+            package="localisation",
+            executable="decode_node",
+            name=f"decode_node_robot_{robot_id}",
+            remappings=[
+                ("/odometry/filtered", ODOMETRY_FILTERED_TEMPLATE.format(id=robot_id)),
+                ("/fallen", FALLEN_TOPIC_TEMPLATE.format(id=robot_id)),
+                ("/localization", LOCALIZATION_TOPIC_TEMPLATE.format(id=robot_id)),
+                ("/filtered_3d_pose", FILTERED_TOPIC_TEMPLATE.format(id=robot_id)),
+            ],
+        )
+        localizataion_nodes.append(ekf_node)
+        localizataion_nodes.append(decode_node)
         # localization node
-        pass
+    return localizataion_nodes
 
 
 def create_approach_nodes(robots_config) -> list[Node]:
@@ -52,53 +69,33 @@ def create_approach_nodes(robots_config) -> list[Node]:
 
 
 def generate_launch_description():
-    camera_config1 = os.path.join(
-        get_package_share_directory("server_main"), "config", "camera_params.yaml"
-    )
-    camera_config2 = os.path.join(
-        get_package_share_directory("server_main"), "config", "camera_params2.yaml"
-    )
     robot_config = os.path.join(
         get_package_share_directory("server_main"), "config", "robots_config.json"
     )
-    yolo_dir = get_package_share_directory("yolo_openvino_ros2")
-    yolo_config_dir = yolo_dir + "/config/"
-
-    tf_config = os.path.join(
-        get_package_share_directory("calibration_node"),
-        "config",
-        "static_transforms.yaml",
-    )
+    yolo_config_dir = get_package_share_directory("yolo_openvino_ros2") + "/config/"
 
-    static_transforms = load_yaml_file(tf_config)
-    tf_nodes = create_tf_nodes(static_transforms)
     localization_nodes = create_localization_nodes(robot_config)
     create_approach_nodes(robot_config)
 
     return LaunchDescription(
         [
-            Node(
-                package="usb_cam",
-                executable="usb_cam_node_exe",
-                parameters=[camera_config1],
-                output="screen",
-                emulate_tty=True,
-                namespace="camera1",
+            IncludeLaunchDescription(
+                PythonLaunchDescriptionSource(
+                    [
+                        FindPackageShare("localization_preprocessor"),
+                        "/launch",
+                        "/multi_camera_aruco_preprocessor.launch.py",
+                    ]
+                )
             ),
-            Node(
-                package="ros2_aruco",
-                executable="aruco_node",
-                parameters=[
-                    {"camera_frame": "camera1"},
-                    {"aruco_dictionary_id": "DICT_4X4_100"},
-                    {"marker_size": 0.07},
-                ],
-                remappings=[
-                    ("/camera/image_raw", "/camera1/image_raw"),
-                    ("/camera/camera_info", "/camera1/camera_info"),
-                ],
-                output="screen",
-                emulate_tty=True,
+            IncludeLaunchDescription(
+                PythonLaunchDescriptionSource(
+                    [
+                        FindPackageShare("simple_approach"),
+                        "/launch",
+                        "/approach.launch.py",
+                    ]
+                )
             ),
             Node(
                 package="yolo_openvino_ros2",
@@ -110,29 +107,6 @@ def generate_launch_description():
                 ],
                 parameters=[os.path.join(yolo_config_dir, "yolo_roma.yaml")],
             ),
-            Node(
-                package="ros2_aruco",
-                executable="aruco_node",
-                parameters=[
-                    {"camera_frame": "camera2"},
-                    {"aruco_dictionary_id": "DICT_4X4_100"},
-                    {"marker_size": 0.07},
-                ],
-                remappings=[
-                    ("/camera/image_raw", "/camera2/image_raw"),
-                    ("/camera/camera_info", "/camera2/camera_info"),
-                ],
-                output="screen",
-                emulate_tty=True,
-            ),
-            Node(
-                package="usb_cam",
-                executable="usb_cam_node_exe",
-                parameters=[camera_config2],
-                output="screen",
-                emulate_tty=True,
-                namespace="camera2",
-            ),
             Node(
                 package="yolo_openvino_ros2",
                 executable="yolo_openvino_node",
@@ -146,18 +120,21 @@ def generate_launch_description():
                 ],
                 output="screen",
             ),
+            Node(
+                package="pic_to_real",
+                executable="ball_to_real",
+                output="screen",
+                emulate_tty=True,
+                parameters=[{"camera_frames": ["camera1", "camera2"]}],
+            ),
             *localization_nodes,
-            *tf_nodes,
             # launch localization for every robot
             # launch approach_teb for every robot
-            # launch strategy
-            # launch pic2real camera1
-            # launch pic2real camera2
             Node(
                 package="strategy",
                 executable="strategy_node",
                 parameters=[
-                    {"robots_params": robot_config},
+                    {"robots_config": robot_config},
                 ],
                 output="screen",
                 emulate_tty=True,
diff --git a/src/strategy/launch/solo_robot_strategy.launch.py b/src/strategy/launch/solo_robot_strategy.launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..efbddc69c13e78dd0662cb8579d73d900ff56769
--- /dev/null
+++ b/src/strategy/launch/solo_robot_strategy.launch.py
@@ -0,0 +1,70 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+import os
+from ament_index_python.packages import get_package_share_directory
+import yaml
+import json
+
+
+def load_json_file(file_path):
+    with open(file_path, "r") as file:
+        return json.load(file)
+
+
+def load_yaml_file(file_path):
+    with open(file_path, "r") as file:
+        return yaml.safe_load(file)
+
+
+def create_tf_nodes(static_transforms: dict):
+    tf_nodes = []
+    for transform in static_transforms["static_transforms"]:
+        node = Node(
+            package="tf2_ros",
+            executable="static_transform_publisher",
+            name=transform["child_frame_id"],
+            arguments=[
+                str(transform["translation"][0]),
+                str(transform["translation"][1]),
+                str(transform["translation"][2]),
+                str(transform["rotation"][0]),
+                str(transform["rotation"][1]),
+                str(transform["rotation"][2]),
+                str(transform["rotation"][3]),
+                transform["parent_frame_id"],
+                transform["child_frame_id"],
+            ],
+        )
+        tf_nodes.append(node)
+    return tf_nodes
+
+
+def create_localization_nodes(robots_config) -> list[Node]:
+    robots = load_json_file(robots_config)["robots"]
+    for robot_id in robots:
+        # preprocessor node
+        # localization node
+        pass
+
+
+def create_approach_nodes(robots_config) -> list[Node]:
+    pass
+
+
+def generate_launch_description():
+    robot_config = os.path.join(
+        get_package_share_directory("server_main"), "config", "robots_config.json"
+    )
+    return LaunchDescription(
+        [
+            Node(
+                package="strategy",
+                executable="strategy_node",
+                parameters=[
+                    {"robots_config": robot_config},
+                ],
+                output="screen",
+                emulate_tty=True,
+            ),
+        ]
+    )
diff --git a/src/strategy/setup.py b/src/strategy/setup.py
index 0c464f307317ecc7fbaf8e8631a91f4a39bf5646..d165b8f3dcf656659655d3b065681159f6555e16 100644
--- a/src/strategy/setup.py
+++ b/src/strategy/setup.py
@@ -1,4 +1,6 @@
 from setuptools import setup
+import os
+from glob import glob
 
 package_name = "strategy"
 
@@ -9,6 +11,10 @@ setup(
     data_files=[
         ("share/ament_index/resource_index/packages", ["resource/" + package_name]),
         ("share/" + package_name, ["package.xml"]),
+        (
+            os.path.join("share", package_name, "launch"),
+            glob(os.path.join("launch", "*launch.[pxy][yma]*")),
+        ),
     ],
     install_requires=["setuptools"],
     zip_safe=True,
diff --git a/src/strategy/strategy/solo_robot_strategy.py b/src/strategy/strategy/solo_robot_strategy.py
index 3249649c3ec669975e1a23d6795ef7e7c0548d4b..ac4536cf354b617377e1bcab9affef560ea46135 100644
--- a/src/strategy/strategy/solo_robot_strategy.py
+++ b/src/strategy/strategy/solo_robot_strategy.py
@@ -3,11 +3,12 @@ from enum import Enum
 import typing as tp
 from geometry_msgs.msg import Pose2D
 import math
+import time
 
 # need struct with motions mapping and json
 KICK_RIGHT = 128
 KICK_LEFT = 64
-STAND_UP = None
+STAND_UP = 2097152
 FIELD_LENGTH = 4.0
 FIELD_WIDE = 3.0
 
@@ -72,40 +73,36 @@ def angle_betwen(from_: Pose2D, to: Pose2D) -> float:
     return math.atan2(y, x)
 
 
-def approach_action(robot_info: RobotInfo) -> tp.Optional[Cmd]:
-    go_pose = robot_info.ball_pose  # copy ?
-    goal_centre = Pose2D()
-    goal_centre.x = 0  # FIELD_WIDE / 2
-    goal_centre.y = FIELD_LENGTH / 2
-    goal_centre.theta = 0
-    # FIELD_WIDE / 2, FIELD_LENGTH / 2, 0)
-    go_pose.theta = angle_betwen(robot_info.robot_pose, goal_centre)
-    robot_info.goal_pose = go_pose
-    return Cmd(1, go_pose)
-
-
 def kick_action(robot_info: RobotInfo) -> tp.Optional[Cmd]:
     # def calculate leg for kick
     dx = robot_info.ball_pose.x - robot_info.robot_pose.x
     dy = robot_info.ball_pose.y - robot_info.robot_pose.y
     theta = math.atan2(dy, dx)
     if theta > robot_info.robot_pose.theta:
-        return Cmd(2, KICK_LEFT)
+        return Cmd(CmdType.MOTION, KICK_RIGHT)
     else:
-        return Cmd(2, KICK_LEFT)
+        return Cmd(CmdType.MOTION, KICK_LEFT)
 
 
 def stand_up_action(robot_info: RobotInfo):
-    return Cmd(2, STAND_UP)
+    return Cmd(CmdType.MOTION, STAND_UP)
 
 
 def wait_action(robot_info: RobotInfo) -> tp.Optional[Cmd]:
     return None
 
 
+def approach_action(robot_info: RobotInfo) -> tp.Optional[Cmd]:
+    go_pose = robot_info.ball_pose  # copy ?
+    # FIELD_WIDE / 2, FIELD_LENGTH / 2, 0)
+    go_pose.theta = math.pi  # angle_betwen(robot_info.robot_pose, goal_centre)
+    robot_info.goal_pose = go_pose
+    return Cmd(CmdType.GO, go_pose)
+
+
 def go_home_action(robot_info: RobotInfo) -> tp.Optional[Cmd]:
-    # approach to home position
-    return None
+    robot_info.goal_pose = robot_info.home_position
+    return Cmd(CmdType.GO, robot_info.goal_pose)
 
 
 attacker_state_actions = {
@@ -116,8 +113,8 @@ attacker_state_actions = {
     RobotState.GO_HOME: go_home_action,
 }
 
-distance_epsilon = 0.05
-angle_epsilon_rad = 0.35  # 20 degree
+DISTANCE_EPS = 0.2
+ANGLE_EPS = 0.5
 
 
 # Other metric ?
@@ -125,13 +122,23 @@ def is_ball_near_to_robot(robot_info: RobotInfo):
     print(f"Goal is {robot_info.goal_pose}")
     print(f"Robot pose is {robot_info.robot_pose}")
     return (
-        abs(robot_info.goal_pose.x - robot_info.robot_pose.x) < distance_epsilon
-        and abs(robot_info.goal_pose.y - robot_info.robot_pose.y) < distance_epsilon
-        and abs(robot_info.goal_pose.theta - robot_info.robot_pose.theta)
-        < angle_epsilon_rad
+        abs(robot_info.goal_pose.x - robot_info.robot_pose.x) < DISTANCE_EPS
+        and abs(robot_info.goal_pose.y - robot_info.robot_pose.y) < DISTANCE_EPS
+        and abs(comp_turn(robot_info.goal_pose.theta, robot_info.robot_pose.theta))
+        < ANGLE_EPS
     )
 
 
+def comp_turn(goal, pos):
+    turn = goal - pos
+    if turn >= math.pi:
+        return turn - 2 * math.pi
+    elif turn < -math.pi:
+        return turn + 2 * math.pi
+    else:
+        return turn
+
+
 def is_robot_at_home(robot_info):
     return True
 
@@ -210,6 +217,85 @@ attacker_state_transitions = {
 }
 
 
+def is_ball_on_my_side(robot_info: RobotInfo) -> bool:
+    return robot_info.ball_pose.x < 0.0
+
+
+def is_robot_on_my_side(robot_info: RobotInfo) -> bool:
+    return robot_info.robot_pose.x < 0
+
+
+defender_state_transitions = {
+    RobotState.WAIT: StateDict(
+        (RobotState.FALL, RobotState.GO_HOME, RobotState.APPROACH),
+        {
+            RobotState.FALL: Transition(
+                lambda robot_info: robot_info.is_fallen, lambda _: None
+            ),
+            RobotState.APPROACH: Transition(
+                lambda robot_info: robot_info.game_state == GameState.RUN
+                and is_ball_on_my_side(robot_info),
+                lambda _: print("SET PARAM Approach On"),
+            ),
+            RobotState.GO_HOME: Transition(
+                lambda robot_info: robot_info.game_state == GameState.START
+                or not is_robot_on_my_side(robot_info),
+                lambda _: None,
+            ),
+        },
+    ),
+    RobotState.APPROACH: StateDict(
+        (RobotState.FALL, RobotState.KICK, RobotState.WAIT),
+        {
+            RobotState.FALL: Transition(
+                lambda robot_info: robot_info.is_fallen, lambda _: None
+            ),
+            RobotState.WAIT: Transition(
+                lambda robot_info: robot_info.game_state == GameState.PAUSED
+                or not is_ball_on_my_side(robot_info),
+                lambda _: None,
+            ),
+            RobotState.KICK: Transition(
+                lambda robot_info: is_ball_near_to_robot(robot_info),
+                lambda _: print("SET param off"),
+            ),
+        },
+    ),
+    RobotState.KICK: StateDict(
+        (RobotState.FALL, RobotState.APPROACH, RobotState.WAIT),
+        {
+            RobotState.FALL: Transition(
+                lambda robot_info: robot_info.is_fallen, lambda _: None
+            ),
+            RobotState.APPROACH: Transition(
+                lambda robot_info: not is_ball_near_to_robot(robot_info),
+                lambda _: print("Set param on"),
+            ),
+            RobotState.WAIT: Transition(lambda robot_info: True, lambda _: None),
+        },
+    ),
+    RobotState.GO_HOME: StateDict(
+        (RobotState.FALL, RobotState.WAIT),
+        {
+            RobotState.FALL: Transition(
+                lambda robot_info: robot_info.is_fallen, lambda _: None
+            ),
+            RobotState.WAIT: Transition(
+                lambda robot_info: is_robot_at_home(robot_info), lambda _: None
+            ),
+        },
+    ),
+    RobotState.FALL: StateDict(
+        (RobotState.WAIT,),
+        {
+            RobotState.WAIT: Transition(
+                lambda robot_info: not robot_info.is_fallen, lambda _: None
+            )
+        },
+    ),
+}
+
+
 class Robot(object):
     def __init__(
         self,
@@ -227,6 +313,7 @@ class Robot(object):
         self.state_transition_dict = state_transition_dict
         self.robot_info = start_robot_info
         self.cmd = None
+        self.fall_time_start: time.time | None = None
 
     def set_robot_info(self, robot_info):
         self.robot_info = robot_info
@@ -239,36 +326,47 @@ class Robot(object):
         return self.robot_info.robot_pose
 
     def get_cmd(self) -> tp.Optional[Cmd]:
-        self.tmp = self.cmd
-        self.cmd = None
-        return self.tmp
-
-    def set_fallen(self, is_fallen):
-        self.robot_info.is_fallen = is_fallen
+        import copy
 
-    def set_ball_pose(self, ball_pose: Pose2D):
+        tmp = copy.copy(self.cmd)
+        self.cmd = None
+        print(f"GET CMD{tmp}")
+        return tmp
+
+    def set_fallen(self, is_fallen: bool) -> None:
+        if (not is_fallen) and self.robot_info.is_fallen:
+            if time.time() - self.fall_time_start > 10:
+                self.robot_info.is_fallen = is_fallen
+        elif is_fallen and (not self.robot_info.is_fallen):
+            self.robot_info.is_fallen = is_fallen
+            self.fall_time_start = time.time()
+        else:
+            self.robot_info.is_fallen = is_fallen
+
+    def set_ball_pose(self, ball_pose: Pose2D) -> None:
         self.robot_info.ball_pose = ball_pose
 
-    def set_game_state(self, game_state: GameState):
+    def set_game_state(self, game_state: GameState) -> None:
         self.robot_info.game_state = game_state
 
     def step(self) -> None:
         # print(f"dict: {self.state_transition_dict}")
         # print(f"\n for state {self.state}: {self.state_transition_dict[self.state]}\n\n\n")
-        for state in self.state_transition_dict[self.state].states:
+        for next_state in self.state_transition_dict[self.state].states:
             if (
                 self.state_transition_dict[self.state]
-                .state_transitions[state]
+                .state_transitions[next_state]
                 .condition(self.robot_info)
             ):
-                print(state)
-                self.state_transition_dict[self.state].state_transitions[state].action(
-                    self.robot_info
-                )
-                self.action = self.state_action_dict[state]
-                self.state = state
+                print(f"State is {next_state}\n")
+                self.state_transition_dict[self.state].state_transitions[
+                    next_state
+                ].action(self.robot_info)
+                self.action = self.state_action_dict[next_state]
+                self.state = next_state
                 break
         cmd = self.action(self.robot_info)
+        print(f"CMD is {cmd}\n")
         if cmd is not None:
             self.cmd = cmd
 
@@ -306,7 +404,7 @@ class RobotTopicConfig:
 
 
 class RobotFabrica:
-    def __init__(self, config):
+    def __init__(self, config: dict):  # path
         self.config = config
         self.robot_configs = []
         self.robots = dict()
@@ -318,9 +416,9 @@ class RobotFabrica:
             self.robot_configs.append(
                 RobotTopicConfig(
                     robot_name,
-                    robot_name + "/filtered_pose",
-                    robot_name + "/cmd",
-                    robot_name + "/plan_goal",
+                    robot_name + "/localization",
+                    robot_name + "/action",
+                    robot_name + "/goal_position",
                     robot_name + "/fallen",
                 )
             )
@@ -337,12 +435,12 @@ class RobotFabrica:
             else:
                 home_pose = Pose2D()
                 home_pose.y = -0.5
-                home_pose.x = -0.5 + 0.5 * int(robot_id)
-                home_pose.theta = 1.57
+                home_pose.x = -1.2 + 0.5 * int(robot_id)
+                home_pose.theta = 0.0
                 default_robot_info = RobotInfo(
                     Pose2D(),
                     Pose2D(),
-                    GameState.START,
+                    GameState.RUN,
                     Role.ATTACKER,
                     Pose2D(),
                     home_pose,
@@ -354,15 +452,21 @@ class RobotFabrica:
                     attacker_state_actions,
                     default_robot_info,
                 )
+                self.robots["robot_1"] = Robot(
+                    Role.ATTACKER,
+                    attacker_state_transitions,
+                    attacker_state_actions,
+                    default_robot_info,  # Role info is Attacker
+                )
 
     def get_robots(self) -> dict[str, Robot]:
         return self.robots
 
-    def get_robots_configs(self) -> list[RobotTopicConfig]:
+    def get_robot_configs(self) -> list[RobotTopicConfig]:
         return self.robot_configs
 
 
-class Strategy(object):
+class Strategy:
     def __init__(self, fabrica: RobotFabrica) -> None:
         self.fabrica = fabrica
         self.robots = self.fabrica.get_robots()
@@ -370,7 +474,7 @@ class Strategy(object):
         self.game_state = GameState.START
 
     def set_robot_pose(self, name: str, pose: Pose2D):
-        # try
+        print(f"\n\n\n{name} Keys is: {self.robots.keys()}")
         self.robots[name].update_pose(pose)
 
     def set_ball_pose(self, pose: Pose2D):
@@ -384,7 +488,6 @@ class Strategy(object):
 
     def set_fallen(self, name: str, is_fallen: bool):
         self.robots[name].set_fallen(is_fallen)
-        pass
 
     def step(self):
         for robot in self.robots.values():
diff --git a/src/strategy/strategy/strategy_node.py b/src/strategy/strategy/strategy_node.py
index 998ce3900e1d8d44bc884aea456f21743dd148e3..2ec164024e989d2326f7c9121ece743ffe788aaa 100644
--- a/src/strategy/strategy/strategy_node.py
+++ b/src/strategy/strategy/strategy_node.py
@@ -1,48 +1,67 @@
-from geometry_msgs.msg import PoseWithCovarianceStamped, Pose2D
-from std_msgs.msg import Int32
+from geometry_msgs.msg import Pose2D
+from std_msgs.msg import Int32, Bool
 
 
 import rclpy
 from rclpy.node import Node
+import time
+from strategy.solo_robot_strategy import Strategy, Cmd, CmdType, RobotFabrica
+import pathlib
+import json
 
-from solo_robot_strategy import Strategy, Cmd, CmdType, RobotFabrica
+robots_config: dict = {"robots": {"0": {}}}
 
-robots_config = {"robots": {"0": {}}}
+
+def load_json_file(file_path: pathlib.Path):
+    with open(file_path, "r") as file:
+        return json.load(file)
 
 
 class StrategyNode(Node):
     def __init__(self):
-        self.declare_parameter("robots_config", rclpy.Parameter.Type.STRING_ARRAY)
-        self.robot_config = self.get_parameter("robots_config")
+        super().__init__("strategy_node")
+        self.declare_parameter("robots_config", rclpy.Parameter.Type.STRING)
+        self.robot_config = pathlib.Path(
+            self.get_parameter("robots_config").get_parameter_value().string_value
+        )
         #
         # [TODO] Param for get events from cli
         #
-        self.fabrica = RobotFabrica(self.robot_config)
+        self.fabrica = RobotFabrica(load_json_file(self.robot_config))
         self.strategy = Strategy(self.fabrica)
+        self.queue_size = 10
+        self.approach_pubs = {}
+        self.low_lvl_pubs = {}
         self._init_topics()
-        self.timer = self.create_timer(0.01, self.step)
+        self.timer = self.create_timer(0.1, self.step)
         self.queue_size = 10
+        self.ball_pose_sub = self.create_subscription(
+            Pose2D, "ball_coordinate_world", self._ball_cb, self.queue_size
+        )
+
+    def _ball_cb(self, msg: Pose2D):
+        self.strategy.set_ball_pose(msg)
 
     def _init_topics(self):
-        robot_configs = self.fabrica.get_robot_configs()
-        self.approach_pubs = {}
-        self.low_lvl_pubs = {}
-        for config in robot_configs:
+        self.robot_configs = self.fabrica.get_robot_configs()
+        for config in self.robot_configs:
             # in perfect world tf but pose will be okay
             self.create_subscription(
-                PoseWithCovarianceStamped,
+                Pose2D,
                 config.localization_topic,
-                lambda msg: self.strategy.set_robot_pose(
-                    config.robot_name, self._msg_preprocess(msg)
+                lambda msg, robot_name=config.robot_name: self.strategy.set_robot_pose(
+                    robot_name, self._msg_preprocess(msg)
                 ),
                 self.queue_size,
             )
             self.create_subscription(
-                Int32,
-                config.low_level_topic,
-                lambda msg: self.strategy.set_fallen(msg.data),
+                Bool,
+                config.fallen_topic,
+                lambda msg, robot_name=config.robot_name: self.strategy.set_fallen(
+                    robot_name, msg.data
+                ),
                 self.queue_size,
-            )
+            )  #
             self.approach_pubs[config.robot_name] = self.create_publisher(
                 Pose2D, config.approach_topic, self.queue_size
             )
@@ -52,27 +71,29 @@ class StrategyNode(Node):
 
     def step(self):
         self.strategy.step()
-        for name in self.robot_publishers:
-            if self.strategy.is_have_cmd(name):
-                cmd = self.strategy.get_cmd()
-                if cmd is not None:
-                    self.send_cmd(cmd)
+        for config in self.robot_configs:
+            cmd = self.strategy.get_cmd(config.robot_name)
+            if cmd is not None:
+                self.send_cmd(config.robot_name, cmd)
 
     def send_cmd(self, robot_name, cmd: Cmd):
         if cmd.type_ == CmdType.GO:
-            msg = self._create_approach_msg(cmd)
+            msg = StrategyNode._create_approach_msg(cmd)
             self.approach_pubs[robot_name].publish(msg)
-        elif cmd.type_ == CmdType.KICK:
+
+        elif cmd.type_ is CmdType.MOTION:
             # Need stop approach
             # then send kick command
-            msg = self._create_low_lvl_msg(cmd)
+            msg = StrategyNode._create_low_lvl_msg(cmd)
             self.low_lvl_pubs[robot_name].publish(msg)
 
+    @staticmethod
     def _create_low_lvl_msg(cmd: Cmd) -> Int32:
-        msg = Int32()
+        msg = Int32()  # CHOOSE ACTION
         msg.data = cmd.data
         return msg
 
+    @staticmethod
     def _create_approach_msg(cmd: Cmd) -> Pose2D:
         msg = Pose2D()
         msg.x = cmd.data.x
@@ -86,7 +107,7 @@ class StrategyNode(Node):
 
 def main(args=None):
     rclpy.init(args=args)
-
+    time.sleep(30)
     strategy_node = StrategyNode()
 
     rclpy.spin(strategy_node)
diff --git a/src/strategy/strategy/strategy_test.py b/src/strategy/test/test_strategy.py
similarity index 53%
rename from src/strategy/strategy/strategy_test.py
rename to src/strategy/test/test_strategy.py
index 863deb5eae52f487a31e63cec7043a3a85230a92..9a9f90cb0da64ce9d3f68ea2bdd0b33a45945a18 100644
--- a/src/strategy/strategy/strategy_test.py
+++ b/src/strategy/test/test_strategy.py
@@ -1,5 +1,6 @@
-from solo_robot_strategy import RobotState, GameState, RobotFabrica, Strategy
+from strategy.solo_robot_strategy import RobotState, GameState, RobotFabrica, Strategy
 from dataclasses import dataclass
+import pytest
 
 
 @dataclass
@@ -12,6 +13,7 @@ class Pose2D:
 solo_config = {"robots": {"0": {None}}}
 
 
+@pytest.mark.regress
 def test_dummy_strategy():
     fabrica = RobotFabrica(solo_config)
     strategy = Strategy(fabrica)
@@ -22,15 +24,16 @@ def test_dummy_strategy():
     strategy.set_ball_pose(Pose2D(0.0, 1.0, 0.0))
     strategy.step()
     assert strategy.robots["robot_0"].state == RobotState.APPROACH
-    strategy.set_robot_pose("robot_0", Pose2D(0.0, 1.00001, 1.57))
+    strategy.set_robot_pose("robot_0", Pose2D(0.0, 1.00001, 3.14))
     strategy.step()
-    print(strategy.robots["robot_0"].state)
+    print("DYMMY:", strategy.robots["robot_0"].state)
     assert strategy.robots["robot_0"].state == RobotState.KICK
     strategy.step()
     assert strategy.robots["robot_0"].state == RobotState.WAIT
 
 
-def test_full_strategy():
+@pytest.mark.regress
+def test_full_attack_strategy():
     fabrica = RobotFabrica(solo_config)
     strategy = Strategy(fabrica)
     strategy.change_game_state(GameState.START)
@@ -62,3 +65,40 @@ def test_full_strategy():
     assert strategy.robots["robot_0"].state == RobotState.WAIT
     strategy.change_game_state(GameState.START)
     # assert(strategy.robots["robot_0"].state == RobotState.GO_HOME)
+
+
+@pytest.mark.regress
+def test_full_defender_strategy():
+    fabrica = RobotFabrica(solo_config)
+    strategy = Strategy(fabrica)
+    strategy.change_game_state(GameState.START)
+    strategy.set_robot_pose("robot_1", Pose2D(0.0, 0.0, 0.0))
+    strategy.step()
+    strategy.step()
+    # assert(strategy.robots["robot_0"].state == RobotState.GO_HOME)
+    # strategy.set_robot_pose("robot_0", Pose2D(-0.5, -0.5, 1.57))
+    # strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.WAIT
+    strategy.change_game_state(GameState.RUN)
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.GO_HOME
+    strategy.set_robot_pose("robot_1", Pose2D(-0.5, 1.0, 1.57))
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.WAIT
+    strategy.set_fallen("robot_1", True)
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.FALL
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.FALL
+    strategy.set_fallen("robot_1", False)
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.WAIT
+    strategy.set_robot_pose("robot_1", Pose2D(-0.5, 1.0, 1.57))
+    strategy.set_ball_pose(Pose2D(-0.5, 1.0, 0.0))
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.APPROACH
+    strategy.change_game_state(GameState.PAUSED)
+    strategy.step()
+    assert strategy.robots["robot_1"].state == RobotState.WAIT
+    strategy.change_game_state(GameState.START)
+    # assert(strategy.robots["robot_0"].state == RobotState.GO_HOME)
diff --git a/src/yolo_openvino_ros2 b/src/yolo_openvino_ros2
index f81be87a2a302c7e67c81fce6981fa94ac27ea4c..d86230cca888d41e1c1a5f957abecd5bfda8e0ba 160000
--- a/src/yolo_openvino_ros2
+++ b/src/yolo_openvino_ros2
@@ -1 +1 @@
-Subproject commit f81be87a2a302c7e67c81fce6981fa94ac27ea4c
+Subproject commit d86230cca888d41e1c1a5f957abecd5bfda8e0ba
diff --git a/workspace_build b/workspace_build
index e09d535c415d3da857be932dcc4b662621f5ee09..10544745a7e3d3781e0c804f2b9766e17b5a875b 100755
--- a/workspace_build
+++ b/workspace_build
@@ -1 +1,2 @@
-colcon build --symlink-install --continue-on-error
+colcon build --symlink-install --continue-on-error --packages-skip teb_local_planner teb_local_planner teb_msgs costmap_converter costmap_converter_msgs odometry_estimator androsot_approach_teb
+source install/setup.bash	modified:   docker_config.yaml run_container.py src/localisation/localisation/dummy_gui_localization.py src/strategy/strategy/solo_robot_strategy.py src/strategy/strategy/strategy_node.py