151 lines
5.9 KiB
Python
151 lines
5.9 KiB
Python
import datetime
|
|
import json
|
|
import logging
|
|
import os
|
|
import time
|
|
from wrapt_timeout_decorator import *
|
|
|
|
logger = logging.getLogger("desktopenv.experiment")
|
|
|
|
|
|
def run_single_example(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
|
|
runtime_logger = setup_logger(example, example_result_dir)
|
|
agent.reset(runtime_logger)
|
|
env.reset(task_config=example)
|
|
time.sleep(60) # Wait for the environment to be ready
|
|
obs = env._get_obs() # Get the initial observation
|
|
done = False
|
|
step_idx = 0
|
|
env.controller.start_recording()
|
|
while not done and step_idx < max_steps:
|
|
response, actions = agent.predict(
|
|
instruction,
|
|
obs
|
|
)
|
|
for action in actions:
|
|
# Capture the timestamp before executing the action
|
|
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
|
|
logger.info("Step %d: %s", step_idx + 1, action)
|
|
obs, reward, done, info = env.step(action, args.sleep_after_execution)
|
|
|
|
logger.info("Reward: %.2f", reward)
|
|
logger.info("Done: %s", done)
|
|
# Save screenshot and trajectory information
|
|
with open(os.path.join(example_result_dir, f"step_{step_idx + 1}_{action_timestamp}.png"),
|
|
"wb") as _f:
|
|
_f.write(obs['screenshot'])
|
|
with open(os.path.join(example_result_dir, "traj.jsonl"), "a") as f:
|
|
f.write(json.dumps({
|
|
"step_num": step_idx + 1,
|
|
"action_timestamp": action_timestamp,
|
|
"action": action,
|
|
"reward": reward,
|
|
"done": done,
|
|
"info": info,
|
|
"screenshot_file": f"step_{step_idx + 1}_{action_timestamp}.png"
|
|
}))
|
|
f.write("\n")
|
|
if done:
|
|
logger.info("The episode is done.")
|
|
break
|
|
step_idx += 1
|
|
result = env.evaluate()
|
|
logger.info("Result: %.2f", result)
|
|
scores.append(result)
|
|
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
|
f.write(f"{result}\n")
|
|
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4"))
|
|
|
|
|
|
def setup_logger(example, example_result_dir):
|
|
runtime_logger = logging.getLogger(f"desktopenv.example.{example['id']}")
|
|
runtime_logger.setLevel(logging.DEBUG)
|
|
runtime_logger.addHandler(logging.FileHandler(os.path.join(example_result_dir, "runtime.log")))
|
|
return runtime_logger
|
|
|
|
def run_single_example_human(env, example, max_steps, instruction, args, example_result_dir, scores):
|
|
runtime_logger = setup_logger(example, example_result_dir)
|
|
env.reset(task_config=example)
|
|
time.sleep(60) # Wait for the environment to be ready
|
|
obs = env._get_obs() # Get the initial observation
|
|
|
|
# Save initial screenshot
|
|
with open(os.path.join(example_result_dir, "initial_state.png"), "wb") as _f:
|
|
_f.write(obs['screenshot'])
|
|
|
|
# Save trajectory information
|
|
with open(os.path.join(example_result_dir, "traj.jsonl"), "a") as f:
|
|
f.write(json.dumps({
|
|
"instruction": instruction,
|
|
"initial_state": "initial_state.png"
|
|
}))
|
|
f.write("\n")
|
|
|
|
# Evaluate the result
|
|
result = env.evaluate()
|
|
logger.info("Result: %.2f", result)
|
|
scores.append(result)
|
|
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
|
f.write(f"{result}\n")
|
|
|
|
|
|
|
|
def run_single_example_openaicua(agent, env, example, max_steps, instruction, args, example_result_dir, scores):
|
|
runtime_logger = setup_logger(example, example_result_dir)
|
|
agent.reset(runtime_logger)
|
|
env.reset(task_config=example)
|
|
time.sleep(60) # Wait for the environment to be ready
|
|
obs = env._get_obs() # Get the initial observation
|
|
done = False
|
|
step_idx = 0
|
|
env.controller.start_recording()
|
|
while not done and step_idx < max_steps:
|
|
response, actions = agent.predict(
|
|
instruction,
|
|
obs
|
|
)
|
|
|
|
done = not response.get('state_correct', False)
|
|
|
|
for action in actions:
|
|
# Capture the timestamp before executing the action
|
|
action_timestamp = datetime.datetime.now().strftime("%Y%m%d@%H%M%S")
|
|
logger.info("Step %d: %s", step_idx + 1, action)
|
|
obs, reward, done, info, step_info = agent.step(action)
|
|
|
|
if not done:
|
|
if not response.get('state_correct', False):
|
|
done = True
|
|
|
|
logger.info("Reward: %.2f", reward)
|
|
logger.info("Done: %s", done)
|
|
# Save screenshot and trajectory information
|
|
with open(os.path.join(example_result_dir, f"step_{step_idx + 1}_{action_timestamp}.png"),
|
|
"wb") as _f:
|
|
_f.write(obs['screenshot'])
|
|
|
|
# Remove pending checks if they exist which will cause issues with json serialization
|
|
if action.get('pending_checks', None):
|
|
del action['pending_checks']
|
|
|
|
with open(os.path.join(example_result_dir, "traj.jsonl"), "a") as f:
|
|
f.write(json.dumps({
|
|
"step_num": step_idx + 1,
|
|
"action_timestamp": action_timestamp,
|
|
"action": action,
|
|
"reward": reward,
|
|
"done": done,
|
|
"info": info,
|
|
"screenshot_file": f"step_{step_idx + 1}_{action_timestamp}.png"
|
|
}))
|
|
f.write("\n")
|
|
if done:
|
|
logger.info("The episode is done.")
|
|
break
|
|
step_idx += 1
|
|
result = env.evaluate()
|
|
logger.info("Result: %.2f", result)
|
|
scores.append(result)
|
|
with open(os.path.join(example_result_dir, "result.txt"), "w", encoding="utf-8") as f:
|
|
f.write(f"{result}\n")
|
|
env.controller.end_recording(os.path.join(example_result_dir, "recording.mp4")) |