Skip to content

First diffusion #102

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
220 changes: 220 additions & 0 deletions 1.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
- - '4cac9f6cd85a5b47'
- '2382542e05f253df'
- - - '5b733b329372ed8c8'
- 'cc716ed59dd6f0250'
- - '10933ac8239bd6fbb'
- 'dedf2ffe66804a16a'
- - 'ab74a4ee392eb4ffc'
- '543679cd5973594a2'
- - '0cd9ea86052a0b8e1'
- '0d70fefa06711301c'
- - '5f775bac151622950'
- 'c8a0832c8f737ce92'
- - '2a46d49b6aa55f1ca'
- 'fc8bd325b3da03b85'
- - '4a421aa61a3bbad1d'
- 'a0dee764f3cd352e8'
- - '4a5ea5d6a2735531c'
- '49d2336208d2cac37'
- - '8e653e8d9b170a7dd'
- '00caf7c273c7d5065'
- - 'ecefe6eb9d594a685'
- 'eb16cfaf356e80f1f'
- - 'c054f473288d5515'
- '68097b10029d5fe1'
- - - '4d115c250e5dc02b1'
- '45d88e7f92a4a9e74'
- - '7b5998d0c96e5433a'
- 'b77f2183b554e6641'
- - 'cd9aed75f054bccb5'
- '6808c5efa6976fe37'
- - '07d57ac58a266170f'
- 'd9bbca7a0907304f1'
- - '50442ffec20a73824'
- 'e0e4ca847f84568a3'
- - '142f665559387e2c2'
- 'f0ec163fc911ff8bb'
- - 'ba9155b84dc322eac'
- '765fab253a51a77e4'
- - '51bb4445380dac75e'
- '9d5c879308ff1f5b5'
- - '645751bf568658675'
- 'fad1ad3bce41df4d9'
- - 'bef503ff5929be3a8'
- '91e2e5e740db004ce'
- - '44a5937b14cec415b'
- 'fa8d78f84cd8b7f8c'
- - '51d3d62e78d712283'
- '62631b017ceec5a18'
- - '7caf8d7d76cd0a120'
- '2bb929aa7298012be'
- - '7c6eb3ebf2ec0a507'
- 'a31ca4af2bc4ff90a'
- - '5fcf415bcf1a37722'
- '6cca874fda7a3507a'
- - 'f14e7ed78b5d55b4'
- 'ae472d675a965aca'
- - - '78e346fdbeb014770'
- '82e6ed49ff7af43fe'
- - 'c481b1ded8232fa61'
- '2251eed913b2fbe46'
- - '43eb9502a0d70fcea'
- 'bc9a77699f862350d'
- - 'd5cbf126369d32fdf'
- '57e7f219c88ab8fe0'
- - '7516958eab299d94d'
- '754d33f5c0419048c'
- - '6c5bcf6f99698fac4'
- 'ac7a491ea6609fc90'
- - '18087593f1147e61f'
- '32b8d38d96342e28b'
- - '6c04df27e5264b20b'
- '232ee9d7342aed6a0'
- - '2a8ffeaa7391e73c7'
- '646aa5cbfd5f84a9e'
- - 'c9c198442a1efdc10'
- 'ceeb361872a012bd7'
- - '6c0405cd5c40c15ab'
- '3d0fbd6a6fbde5f86'
- - '13310ca9a7715154'
- 'd01fec804cd45644'
- - - 'bc686838d510dad87'
- 'bc6888f8da8ee3d48'
- - '90dfc4788a349e60b'
- '5fb2ee221c72e08b2'
- - '8d59e8ecb60e11b5b'
- '1effe02b2af9680a9'
- - 'fc3f449099625878e'
- 'fedfbdfcf20092620'
- - 'eaa70df977f0ed637'
- '32a33652619708c65'
- - 'dc92f82e6715ac493'
- 'fd0fe3e97024a208f'
- - '6d8d91ad290f124b2'
- '362376a65fe8ca576'
- - 'c58481114df532967'
- '863415a0e3cc1897d'
- - 'af1a4d903d6c645ce'
- 'a946e96acbe5186d9'
- - '4855fc07a04961bfd'
- '73bd1fde4cf4bbd5a'
- - 'e147b31afd8a5c390'
- '32888a8d5ffa4cfa6'
- - 'beee26fb5719cdfcb'
- '27d64cbd86e07452b'
- - 'c1839a3333695317'
- 'f3ca2707d87a586b'
- - - '774a05df3c1512cac'
- '431df46346088a1de'
- - 'e9f1d3b368a8f7603'
- '026597b466e481724'
- - '956f2e4043c1b8f4d'
- 'b5d11a125c8146f58'
- - '8c19ef349d4fc37cb'
- '80fbecd9f16630510'
- - 'dd01f78238cd88b26'
- '8413ea29ea72fdfde'
- - 'c807c2486a7897e81'
- '5688641dd10efb02d'
- - '6d342222e4d9d2b2a'
- '8aea1806b6e406159'
- - 'ea96a137518d58ac9'
- '6f8794f5f301202d0'
- - 'e774d1b87b07638c4'
- '8d71beefa48cb91fb'
- - 'ba3f20b19ddb93cb1'
- '8c7844d1d621c907b'
- - '5913dfbc435fb100c'
- '6333f19e87ad08122'
- - 'eedb6f9d589e745c8'
- '9f903173a7777cf35'
- - '188fef8a871d3bb74'
- '7098c40dee5929017'
- - '5c8283705c1e44a16'
- '78fdd992fa8acb334'
- - '0225e9a16d781eb96'
- '8f3bf4b1cb4be05f8'
- - '4c03648ccf3301fe7'
- '241e6bedc6fcf2de4'
- - 'ef08e59a9c882aea6'
- '801d1b42c0409656b'
- - 'c6f215cb7b0d88628'
- '95bfecce656f72d69'
- - 'de1d1d3fbf4860a8c'
- '474779897c967667c'
- - 'f6b9867069845fd2'
- '19e90f2757b25f38'
- - - 'c91d6633fc0516a81'
- '3042cb349e2487756'
- - '042bf7d6a0f98a104'
- 'f67fdc6f83d2e8656'
- - '38d50c91468fbe6e5'
- 'e03e2999009dbd343'
- - '07a36d3f2100bc352'
- 'ffe0db4c4b5ed141a'
- - 'a52a1bf87700c06b5'
- '32f2437326c9e81b2'
- - 'beae653519f0232e7'
- 'e64bbff1f3e0c30be'
- - '9e0949ca46f2a4a49'
- '2e822d476fd14739c'
- - '913e360242a39e667'
- 'b545597d491f4a96e'
- - '23b5ec16bb6ad0b92'
- '34e7a39f33ba4f108'
- - 'f2b84517dc455c03f'
- 'ebe3f434bfee258dc'
- - '875f6c89666fde397'
- 'e8a9486b49b69adbf'
- - '3c542be991515ccd'
- '5f53001e7e2d5347'
- - - '5a6f69ea54c00b6d2'
- '9cb5545908502a26c'
- - 'c47263e93d93b6838'
- 'e92394471671ed533'
- - 'dd091b9e715ca6370'
- 'c64e0d6194293f51e'
- - '01898e4c2520a9400'
- 'bc730296c22859127'
- - 'be4d95422b6b6e212'
- 'bffe666f9175e93c1'
- - 'c909e005442d28a9c'
- '9600f9f6992618229'
- - '2048a65fec13c23d5'
- '71afcb9b1ac436e14'
- - '1a6ba40b74f9b51e5'
- '13f7d51df20a8dc66'
- - '321422cad3f85a6a3'
- '808e6b707f608e2a9'
- - 'e7ae2cd68d33a847e'
- '7a999e76f404c4956'
- - 'f9533c6195790b299'
- 'aaaeb97c15db0e2f6'
- - 'b3c7672d677c644c7'
- '960a0cd8fd6e4be22'
- - '5922fad17f2f5e59f'
- 'e1f0dcd38a9ec2523'
- - '15755cd0249f8223a'
- '190768b2e92e01b36'
- - 'b795e9c4c8fde41bb'
- '3c8afefbaf08142c1'
- - '0dc54a8c8203567b'
- 'ed3db88f99a05a6c'
- - - '4876fb179a00309b5'
- '35fc09471c163fd84'
- - 'e593c645d1fd3fdb9'
- '5065468cd60f1bc00'
- - 'ae4f3518384257a07'
- 'ad537f45665843f16'
- - '12499ddd90200ea9f'
- 'e6aaf829acef158f6'
- - 'cea842f2d5b0ad72d'
- '02644631a56fa1750'
- - 'c2e48998f58fcf229'
- '5ae5fc96ae87f7041'
- - '583a61a4880467bf7'
- '5f69ad192f42e1e35'
- - 'ad027cf65e5484130'
- '2df81c34dac0b3f5c'
- - 'f5d454a22e5731c5a'
- '4533792bbf9b2478b'
8 changes: 8 additions & 0 deletions 1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# /data/hdd01/dingzx/dataset/private_test_hard_two_stage/openscene_meta_datas/0a79e71ca7aa8e249.pkl
# 1d70d69255aef8709
import pickle

with open('/data/hdd01/dingzx/dataset/private_test_hard_two_stage/openscene_meta_datas/0a79e71ca7aa8e249.pkl', 'rb') as f:
data = pickle.load(f)

print(data) # 打印文件内容
4 changes: 4 additions & 0 deletions 1/1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
print("<<")
print("<<")
print("<<")
print("<<")
9 changes: 7 additions & 2 deletions navsim/agents/abstract_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,23 @@ def compute_trajectory(self, agent_input: AgentInput) -> Trajectory:
"""
self.eval()
features: Dict[str, torch.Tensor] = {}
# targets: Dict[str, torch.Tensor] = {}
# build features
for builder in self.get_feature_builders():
features.update(builder.compute_features(agent_input))

# build targets
# for builder in self.get_target_builders():
# targets.update(builder.compute_targets(agent_input))

# add batch dimension
features = {k: v.unsqueeze(0) for k, v in features.items()}

# targets = {k: v.unsqueeze(0) for k, v in targets.items()}
# forward pass
with torch.no_grad():
predictions = self.forward(features)
poses = predictions["trajectory"].squeeze(0).numpy()

# print("huatu")
# extract trajectory
return Trajectory(poses, self._trajectory_sampling)

Expand Down
127 changes: 127 additions & 0 deletions navsim/agents/abstract_agent_diffusiondrive.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
from abc import abstractmethod, ABC
from typing import Dict, Union, List
import torch
import pytorch_lightning as pl
from typing import Tuple
import numpy as np
from navsim.common.dataclasses import AgentInput, Trajectory, SensorConfig, Scene
from navsim.planning.training.abstract_feature_target_builder import AbstractFeatureBuilder, AbstractTargetBuilder


class AbstractAgent(torch.nn.Module, ABC):
"""Interface for an agent in NAVSIM."""

def __init__(
self,
requires_scene: bool = False,
):
super().__init__()
self.requires_scene = requires_scene

@abstractmethod
def name(self) -> str:
"""
:return: string describing name of this agent.
"""
pass

@abstractmethod
def get_sensor_config(self) -> SensorConfig:
"""
:return: Dataclass defining the sensor configuration for lidar and cameras.
"""
pass

@abstractmethod
def initialize(self) -> None:
"""
Initialize agent
:param initialization: Initialization class.
"""
pass

def forward(self, features: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Forward pass of the agent.
:param features: Dictionary of features.
:return: Dictionary of predictions.
"""
raise NotImplementedError

def get_feature_builders(self) -> List[AbstractFeatureBuilder]:
"""
:return: List of target builders.
"""
raise NotImplementedError("No feature builders. Agent does not support training.")

def get_target_builders(self) -> List[AbstractTargetBuilder]:
"""
:return: List of feature builders.
"""
raise NotImplementedError("No target builders. Agent does not support training.")

def compute_trajectory(self, agent_input: AgentInput) -> Tuple[np.ndarray,np.ndarray]:
"""
Computes the ego vehicle trajectory.
:param current_input: Dataclass with agent inputs.
:return: Trajectory representing the predicted ego's position in future
"""
self.eval()
features: Dict[str, torch.Tensor] = {}
# targets: Dict[str, torch.Tensor] = {}
# build features
for builder in self.get_feature_builders():
features.update(builder.compute_features(agent_input))

# for builder in self.get_target_builders():
# targets.update(builder.compute_targets(scene))


# "trajectory": trajectory,
# "agent_states": agent_states,
# "agent_labels": agent_labels,
# "bev_semantic_map": bev_semantic_map,`

# add batch dimension
features = {k: v.unsqueeze(0) for k, v in features.items()}
# targets = {k: v.unsqueeze(0) for k, v in targets.items()}
# print(targets)
# print('<<<<<<<<<<<<<<<')
# poses1 = targets["trajectory"].squeeze(0).numpy()
# forward pass
with torch.no_grad():
predictions = self.forward(features)
poses = predictions['trajectory'].squeeze(0).numpy()# 20 8 3 20 64 8 3
anchor_poses = predictions['anchor_trajectories'].squeeze(0).numpy() # 确保转换为numpy
# extract trajectory
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# print(anchor_trajectories.shape)
return poses,anchor_poses
# return Trajectory(poses)

def compute_loss(
self,
features: Dict[str, torch.Tensor],
targets: Dict[str, torch.Tensor],
predictions: Dict[str, torch.Tensor],
) -> torch.Tensor:
"""
Computes the loss used for backpropagation based on the features, targets and model predictions.
"""
raise NotImplementedError("No loss. Agent does not support training.")

def get_optimizers(
self,
) -> Union[torch.optim.Optimizer, Dict[str, Union[torch.optim.Optimizer, torch.optim.lr_scheduler.LRScheduler]]]:
"""
Returns the optimizers that are used by thy pytorch-lightning trainer.
Has to be either a single optimizer or a dict of optimizer and lr scheduler.
"""
raise NotImplementedError("No optimizers. Agent does not support training.")

def get_training_callbacks(self) -> List[pl.Callback]:
"""
Returns a list of pytorch-lightning callbacks that are used during training.
See navsim.planning.training.callbacks for examples.
"""
return []
Loading