import jyro.simulator as jy
import random
import numpy as np
robot = jy.Pioneer("Pioneer", 3.5, 2, 0)
robot.addDevice(jy.Pioneer16Sonars())
robot.addDevice(jy.DepthCamera(4))
light_sensors = jy.PioneerFrontLightSensors(3.0)
light_sensors.lightMode = 'ambient'
robot.addDevice(light_sensors)
def worldf(physics):
physics.addBox(0, 0, 4, 4, fill="backgroundgreen", wallcolor="gray")
physics.addLight(2, 0.75, 1.0) # increased brightness for new linear version of lights
sim = jy.VSimulator(robot, worldf)
VBox(children=(VBox(children=(HBox(children=(Checkbox(value=True, description='Update GUI'), Checkbox(value=Fa…
camera = robot.device["camera"]
image = camera.getImage()
image
image.size
(60, 40)
data = camera.getData()
data.shape
(40, 60, 3)
robot.move(0.50, 0.35)
sim.step()
robot = jy.Pioneer("Pioneer", 3.5, 2, 0)
robot.addDevice(jy.Pioneer16Sonars())
robot.addDevice(jy.Camera())
light_sensors = jy.PioneerFrontLightSensors(3.0)
light_sensors.lightMode = 'ambient'
robot.addDevice(light_sensors)
sim = jy.VSimulator(robot, worldf)
VBox(children=(VBox(children=(HBox(children=(Checkbox(value=True, description='Update GUI'), Checkbox(value=Fa…
camera = robot.device["camera"]
image = camera.getImage()
image
data = camera.getData()
data.shape
(40, 60, 3)
def random_action():
"""Generate a random action from a limited set of possible settings"""
possible = [-1.0, -0.5, 0.0, 0.5, 1.0]
return [random.choice(possible), random.choice(possible)]
def get_senses(robot):
light = robot["light"].getData()
sonar = [v/3.0 for v in robot["sonar"].getData()]
camera = robot["camera"].getData()
return [light, sonar, camera]
senses = get_senses(robot)
list(map(len, senses))
[2, 16, 40]
robot.history = []
def brain(robot):
senses = get_senses(robot)
net.propagate(senses)
translate, rotate = random_action()
#self.move(translate, rotate)
robot.history.append(robot.getPose())
robot.move(0.50, 0.35)
robot.brain = brain
import conx as cx
Using TensorFlow backend. ConX, version 3.7.4
net = cx.Network("Robot Prediction Network")
net.add(cx.Layer("light", 2),
cx.Layer("sonar", 16),
cx.ImageLayer("camera", (40,60), 3),
cx.FlattenLayer("flatten"),
cx.Conv2DLayer("conv", 16, (3,3), keep_aspect_ratio=True),
cx.Layer("hidden", 50, activation="relu"),
cx.Layer("output1", 2, activation="sigmoid"),
cx.Layer("hidden2", 5, activation="sigmoid"),
cx.Layer("hidden3", 10, activation="sigmoid", dropout=0.25),
cx.Layer("hidden4", 10, activation="sigmoid"),
cx.Layer("output2", 5, activation="sigmoid"))
'output2'
net.connect("sonar", "hidden2")
net.connect("light", "hidden")
net.connect("camera", "conv")
net.connect("conv", "flatten")
net.connect("flatten", "hidden2")
net.connect("hidden", "hidden2")
net.connect("hidden2", "hidden3")
##net.connect("hidden2", "output2")
net.connect("hidden3", "output2")
net.connect("hidden3", "hidden4")
net.connect("hidden4", "output1")
net.compile(error="mean_squared_error", optimizer="adam")
cx.maximum(get_senses(robot))
1.2321053632934627
net.picture(get_senses(robot), hspace=200, scale=1)
net.propagate_to_features("conv", get_senses(robot), scale=3)
net.dataset.append([[1] * 2, [1] * 16, data], [[1] * 2, [1] + ([1] * 4)])
net.dashboard()
Dashboard(children=(Accordion(children=(HBox(children=(VBox(children=(Select(description='Dataset:', index=1, …
net.picture()
net.evaluate()
======================================================== Testing validation dataset with tolerance 0.1... Total count: 1 correct: 0 incorrect: 1 Total percentage correct: 0.0
#net.delete()
#net.reset()
if net.saved():
net.load()
net.plot_results()
else:
net.train(epochs=200)
net.save()
net.plot("all")
net.evaluate(show=True)
======================================================== Testing validation dataset with tolerance 0.1... # | inputs | targets | outputs | result --------------------------------------- 0 | [[0.00, 0.00],[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],[[[0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], ..., [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90]], [[0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], ..., [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90]], [[0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], ..., [0.68, 0.85, 0.90], [0.68, 0.85, 0.90], [0.68, 0.85, 0.90]], ..., [[0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], ..., [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87]], [[0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], ..., [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87]], [[0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], ..., [0.93, 0.95, 0.87], [0.93, 0.95, 0.87], [0.93, 0.95, 0.87]]]] | [[0.00, 0.00],[1.00, 0.00, 0.00, 0.00, 0.00]] | [[0.24, 0.36],[0.79, 0.32, 0.25, 0.39, 0.26]] | X Total count: 1 correct: 0 incorrect: 1 Total percentage correct: 0.0
for i in range(100):
sim.step()
def function(simulator, index):
cam_image = simulator.get_image()
return (simulator.canvas.render("pil"),
cam_image.resize((cam_image.size[0] * 4,
cam_image.size[1] * 4)))
sim.playback(robot.history, function)
Failed to display Jupyter Widget of type SequenceViewer
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
def function(simulator, index):
cam_image = simulator.get_image()
return simulator.canvas.render("pil")
sim.movie(robot.history, function, movie_name="sim-robot.gif")