This commit is contained in:
TerenceLiu98 2024-05-01 16:45:31 +00:00
parent 4af428c63e
commit 18293b45b1
6 changed files with 122 additions and 16 deletions

View File

@ -24,4 +24,37 @@ A ADAS(Advanced Driver Assistance System) for Euro Truck Simulator 2 (or America
- This dataset contains recorded screen of Euro Truck Simulator 2 and paired input from Steering wheel controller (Thrustmaster Ff430).
- Dataset contains 323894 frames captured at 25fps.
- Each frame is paired with steering wheel controller input values at that moment
- Using [Europilot](https://github.com/marsauto/europilot)
- Using [Europilot](https://github.com/marsauto/europilot)
Data Directory Structure:
```
(BDD100k dataset)
├── image
│ └── 100k
│ ├── test
│ ├── train
│ └── val
└── label
├── det_20
├── drivable
│ ├── colormaps
│ │ ├── train
│ │ └── val
│ ├── masks
│ │ ├── train
│ │ └── val
│ ├── polygons
│ └── rles
└── lane
├── colormaps
│ ├── train
│ └── val
├── masks
│ ├── train
│ └── val
└── polygons
```
![example.png](src/perception/sample.png)

View File

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 24,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -92,10 +92,10 @@
"#lane detection\n",
"obj_df = pd.read_json(\"~/data/ETS2/bdd100k/label/det_20/det_train.json\")\n",
"\n",
"selected_image = obj_df[obj_df[\"name\"] == \"024ac038-8a8b481c.jpg\"].reset_index(drop=True)\n",
"selected_image = obj_df[obj_df[\"name\"] == \"0000f77c-6257be58.jpg\"].reset_index(drop=True)\n",
"print(selected_image[\"attributes\"][0])\n",
"\n",
"image = io.imread(\"~/data/ETS2/bdd100k/image/100k/train/024ac038-8a8b481c.jpg\")\n",
"image = io.imread(\"~/data/ETS2/bdd100k/image/100k/train/0000f77c-6257be58.jpg\")\n",
"fig, ax = plt.subplots(1) \n",
"ax.imshow(image)\n",
"for i in range(0, len(selected_image[\"labels\"][0])):\n",

View File

@ -45,19 +45,11 @@ def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def tensor2im(tensor=None):
output = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
output = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
return output
OBJ_LABELS = {
'unlabeled':0, 'dynamic': 1, 'ego vehicle': 2, 'ground': 3,
'static': 4, 'parking': 5, 'rail track': 6, 'road': 7,
'sidewalk': 8, 'bridge': 9, 'building': 10, 'fence': 11,
'garage': 12, 'guard rail': 13, 'tunnel': 14, 'wall': 15,
'banner': 16, 'billboard': 17, 'lane divider': 18,'parking sign': 19,
'pole': 20, 'polegroup': 21, 'street light': 22, 'traffic cone': 23,
'traffic device': 24, 'traffic light': 25, 'traffic sign': 26, 'traffic sign frame': 27,
'terrain': 28, 'vegetation': 29, 'sky': 30, 'person': 31,
'rider': 32, 'bicycle': 33, 'bus': 34, 'car': 35,
'caravan': 36, 'motorcycle': 37, 'trailer': 38, 'train': 39,
'truck': 40
"truck": 0, "bicycle": 1, "car": 2, "motorcycle": 3,
"train": 4, "bus": 5, "traffic sign": 6, "rider": 7, "person": 8,
"traffic light NA": 9, "traffic light R": 10, "traffic light G": 11, "traffic light B": 12
}

View File

@ -0,0 +1,81 @@
from base import *
class AutoDriveDataset(Dataset):
def __init__(self, csv_file, image_dir, lane_dir, da_dir, transform=None):
self.data_frame = pd.read_json(csv_file)
self.image_dir, self.lane_dir, self.da_dir = image_dir, lane_dir, da_dir
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __readdata__(self, idx):
image_name = os.path.join(self.image_dir, self.data_frame.iloc[idx, 0])
label_name = os.path.join(self.da_dir, self.data_frame.iloc[idx, 0]).replace("jpg", "png")
image = cv2.cvtColor(cv2.imread(f"{image_name}"), cv2.COLOR_BGR2RGB)
da = cv2.cvtColor(cv2.imread("{}".format(label_name)), cv2.COLOR_BGR2RGB)
lane = cv2.cvtColor(cv2.imread("{}".format(label_name.replace("drivable", "lane"))), cv2.COLOR_BGR2RGB)
label_data = self.data_frame.iloc[idx, 3]
boxes = []
labels = []
for item in label_data:
xmin, ymin, xmax, ymax = item["box2d"]["x1"], item["box2d"]["y1"], item["box2d"]["x2"], item["box2d"]["y2"]
boxes.append([xmin, ymin, xmax, ymax])
if item["category"] == "traffic light":
item["category"] = item["category"] + " " + item["attributes"]["trafficLightColor"]
elif item["category"] == "other vehicle" or "trailer":
item["category"] = "car"
elif item["category"] == "other person" or "pedestrain":
item["category"] = "person"
else:
pass
labels.append(OBJ_LABELS[item['category']])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
target = {"lane": lane, "drivable": da, "boxes": boxes, "labels": labels}
return image, target
def __getitem__(self, idx):
image, target = self.__readdata__(idx=idx)
if self.transform:
image, target = self.__augmentation__(image, target)
return image, target
def __augmentation__(self, image, target):
transform = Aug.Compose([
Aug.Resize (720, 640, p=1), Aug.HorizontalFlip(p=0.5), Aug.RandomBrightnessContrast(p=0.5)],
bbox_params=Aug.BboxParams(format='pascal_voc', label_fields=['labels']))
transformed = transform(image=image, masks=[target["lane"], target["drivable"]],
bboxes=target["boxes"], labels=target["labels"])
image = transformed["image"].transpose(2, 0, 1)
target = {"lane":transformed["masks"][0], "drivable": transformed["masks"][1],
"boxes": transformed["bboxes"], "labels": transformed["labels"]}
return image, target
def collate_fn(self, batch):
images, targets = zip(*batch)
images = images
targets = [{k: v for k, v in t.items()} for t in targets]
return images, targets
if __name__ == "__main__":
dataset = AutoDriveDataset(csv_file="/home/bayes/data/ETS2/bdd100k/label/det_20/det_train.json",
image_dir="/home/bayes/data/ETS2/bdd100k/image/100k/train/",
lane_dir="/home/bayes/data/ETS2/bdd100k/label/lane/colormaps/train/",
da_dir="/home/bayes/data/ETS2/bdd100k/label/drivable/colormaps/train/", transform=True)
A, B = dataset.__getitem__(idx=10)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=dataset.collate_fn)
images, targets = next(iter(dataloader))
fig, ax = plt.subplots(1)
ax.imshow(A.transpose(1, 2, 0))
ax.imshow(B["lane"], alpha=0.5)
ax.imshow(B["drivable"], alpha=0.2)
for i in range(0, len(B["boxes"])):
rect = patches.Rectangle((B["boxes"][i][0], B["boxes"][i][1]),
B["boxes"][i][2] - B["boxes"][i][0], B["boxes"][i][3] - B["boxes"][i][1], linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.savefig("sample.png", dpi=500)

0
src/perception/models.py Normal file
View File

BIN
src/perception/sample.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB