Commit 1b0fb560 authored by Max Maton's avatar Max Maton
Browse files

Initial commit

parents
*.png
*.jpg
*.mp4
#!/bin/bash
echo $(exiftool -MakerNotes -b "$1" 2>/dev/null | dd bs=1 skip=64 count=19 2>/dev/null | shasum | cut -d' ' -f1)-$(exiftool -CreateDate -T -d %s "$1").jpg
#!/usr/bin/env python3
import tensorflow as tf
import io
import math
with open("/root/highway.csv") as datapoints:
lines = datapoints.readlines()
with open("/root/highway.validation.csv") as validationdatapoints:
validationlines = validationdatapoints.readlines()
points = []
validationpoints = []
for line in lines:
parts = list(map(float, line.split(";")))
if parts[1] == -1 or parts[2] == -1:
continue
points.append([parts[1], parts[2]])
for line in validationlines:
parts = list(map(float, line.split(";")))
if parts[1] == -1 or parts[2] == -1:
continue
validationpoints.append([parts[1], parts[2]])
data = tf.constant(points)
validationdata = tf.constant(validationpoints)
constant = tf.Variable(tf.random_uniform([1], 0.0, 1.0))
linear = tf.Variable(tf.random_uniform([1], 0.0, 1.0))
quad = tf.Variable(tf.random_uniform([1], 0.0, 1.0))
#quad = tf.Variable(1.0)
#linear = tf.Variable(1.0)
#constant = tf.Variable(0.0)
lanewidth = 3.5
def convert(data):
norm = data / 128.0
sqr = tf.square(norm)
return norm * linear + constant
#def convert(data):
# fov_estimate = 30.0 / 360.0 * 2 * math.pi / 128.0
# return tf.sin(fov_estimate * quad * data) * 2 * linear
distanceEstimate = convert(data)
validationdistanceEstimate = convert(validationdata)
widthEstimate = distanceEstimate[:, 0] + distanceEstimate[:, 1] + tf.constant(2.0)
validationwidthEstimate = validationdistanceEstimate[:, 0] + validationdistanceEstimate[:, 1] + tf.constant(2.0)
labels = tf.constant([lanewidth for x in range(len(points))])
validationlabels = tf.constant([lanewidth for x in range(len(validationpoints))])
varlossWeight = 0.001
constantWeight = 0.1
varloss = tf.square(constant) + varlossWeight * (tf.square(linear) + tf.square(quad))
#varloss += tf.maximum(0.0, -quad)
varloss += tf.maximum(0.0, -linear)
#varloss = varlossWeight * (tf.abs(constant) + tf.abs(linear) + tf.abs(quad))
errloss = tf.losses.mean_squared_error(labels, widthEstimate)
validationloss = tf.losses.mean_squared_error(validationlabels, validationwidthEstimate)
loss = errloss + varloss
train_step = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session();
sess.run(init)
print(sess.run((varloss, errloss)))
for i in range(200000):
sess.run(train_step)
if i % 1000 == 0:
print(sess.run([constant, linear, quad, varloss, errloss, validationloss]))
#!/usr/bin/env python3
import glob
import json
import os
import hashlib
keyframes = glob.glob("*.json")
keyframes.sort()
cameras = {}
for path in keyframes:
with open(path) as f:
keyframe = json.load(f)
if not keyframe['validWheel']:
continue
origlink = '.'.join(path.split('.')[:2]) + '.orig.jpg'
origfile = os.readlink(origlink)
makerinfo = '.'.join(origfile.split('.')[:-1]) + '.makerinfo'
timestamp = int(path.split('.')[0])
with open(makerinfo, 'rb') as makerfile:
makerfile.read(64)
interesting = makerfile.read(19)
camera = hashlib.sha1(interesting).hexdigest()
if not camera in cameras:
cameras[camera] = {}
cameras[camera][timestamp] = keyframe
images = glob.glob("*.rotated.jpg")
times = {}
for camera in cameras:
times[camera] = list(cameras[camera].keys())
times[camera].sort()
for path in images:
direction = path.split('.')[1]
origlink = '.'.join(path.split('.')[:2]) + '.orig.jpg'
origfile = os.readlink(origlink)
makerinfo = '.'.join(origfile.split('.')[:-1]) + '.makerinfo'
timestamp = int(path.split('.')[0])
with open(makerinfo, 'rb') as makerfile:
makerfile.read(64)
interesting = makerfile.read(19)
camera = hashlib.sha1(interesting).hexdigest()
prevT = -1
nextT = -1
for i in times[camera]:
if i < timestamp:
prevT = i
continue
if i >= timestamp:
nextT = i
break
prevKey = False
if prevT in cameras[camera]:
prevKey = cameras[camera][prevT]
if nextT in cameras[camera]:
nextKey = cameras[camera][nextT]
else:
nextKey = prevKey
if not prevKey:
prevKey = nextKey
if not nextKey:
print("no keyframe for " + path)
break
distPrev = timestamp - prevT
distNext = nextT - timestamp
fracPrev = distNext / (distPrev + distNext)
fracNext = distPrev / (distPrev + distNext)
#if distPrev + distNext > 300:
# print("distance high: " + str(distPrev + distNext))
# print(path)
horizon = prevKey['horizonY'] * fracPrev + nextKey['horizonY'] * fracNext
wheel = prevKey['wheelX'] * fracPrev + nextKey['wheelX'] * fracNext
imHorizon = int(3264 - horizon)
imWheel = int(wheel)
blockTop = int(imHorizon - 575)
blockLeft = int(imWheel - 1150)
region = "1150x1150+" + str(blockLeft) + "+" + str(blockTop)
flip = ""
if direction == "right":
flip = "-flop "
prefix = '.'.join(path.split('.')[:3])
with open(prefix + ".region", "w+") as f:
print("cat " + prefix + ".region")
f.write(region)
neuralimg = prefix + ".neural.png"
# if not os.path.isfile(neuralimg):
#print("convert " + path + " " + flip + " -crop " + region + " +repage -resize 16x16 -normalize " + neuralimg)
previewimg = prefix + ".preview.png"
#if not os.path.isfile(previewimg):
print("convert " + path + " " + flip + " -crop " + region + " +repage -resize 128x128 -normalize " + previewimg)
houghfile = prefix + ".hough.mvg"
#if not os.path.isfile(houghfile):
#print("convert " + path + " " + flip + " -crop " + region + " +repage -resize 128x128 -normalize -canny 0x1+10%+30% -hough-lines 7x7+20 " + houghfile)
lineimg = prefix + ".lines.png"
#if not os.path.isfile(lineimg):
print("convert " + path + " " + flip + " -crop " + region + " +repage -resize 128x128 -normalize -canny 0x1+10%+30% -hough-lines 7x7+20 " + lineimg)
annotationimg = prefix + ".positions.jpg"
#if not os.path.isfile(annotationimg):
print("convert " + path + " " + flip + " -fill '#ff000088' -draw \"rectangle " + str(imWheel) + "," + str(imHorizon) + " " + str(imWheel + 64) + "," + str(imHorizon + 64) + "\" " + annotationimg)
#!/bin/bash
set -euo pipefail
counter=0
prefix=$(pwd)
filename=${1:-out.mp4}
filter=${2:-positions}
starttime=${3-1}
endtime=${4-1927696120}
outdir=$(mktemp -d)
trap "rm -r $outdir" EXIT
ls *.$filter* | while read file; do
time=$(cut -d. -f1 <<< $file)
[[ "$time" -lt "$starttime" ]] && continue
[[ "$time" -gt "$endtime" ]] && continue
printf -v padded "%05d" $counter
ln -s $prefix/$file $outdir/$padded.jpg
counter=$(( counter + 1 ))
done
ffmpeg -y -i "$outdir/%05d.jpg" -framerate 10 "$filename" </dev/null
extractdistances "$starttime" "$endtime" > "$filename.csv"
#!/usr/bin/env python3
import sys
import glob
import numpy
fromtimestamp = int(sys.argv[1])
totimestamp = int(sys.argv[2])
predictions = glob.glob("*.rotated.predicted.npy")
predictions.sort()
currenttime = fromtimestamp
distances = { "left": -1, "right": -1 }
car_width = 2.0
# see ./correctlens
constant = 0.0248796
linear = 1.1626804
quad = 0
def indexToMeters(index):
norm = index / 128.0
return (norm * norm) * quad + norm * linear + constant
print("timestamp; left; right; left_meters; right_meters; lane_width")
for path in predictions:
timestamp = int(path.split('.')[0])
if timestamp < fromtimestamp or timestamp >= totimestamp:
continue
if timestamp > currenttime:
left_meters = indexToMeters(distances["left"])
if distances["left"] == -1:
left_meters = -1
right_meters = indexToMeters(distances["right"])
if distances["right"] == -1:
right_meters = -1
lanewidth = -1
if distances["left"] != -1 and distances["right"] != -1:
lanewidth = left_meters + right_meters + 2.0
print("; ".join(map(str, [timestamp, distances["left"], distances["right"], left_meters, right_meters, lanewidth])))
currenttime = timestamp
distances = { "left": -1, "right": -1 }
data = numpy.load(path).item()
distance = data["lineIndex"]
direction = path.split('.')[1]
distances[direction] = distance
#!/bin/bash
filename=$(basename $1)
# D-shubham-RHDHV-Lane-position-tracking-22nd-May-front-left-G0059400.JPG.corrected.jpg
basepart=$(cut -d'.' -f1 <<< $filename)
id=$(cut -d'-' -f11 <<< $basepart)
day=$(cut -d'-' -f7 <<< $basepart | tr -d '[:lower:]')
direction=$(cut -d'-' -f10 <<< $basepart | tr '[:upper:]' '[:lower:]')
echo ${day}-${direction}/${id}.JPG
#!/usr/bin/env python3
import numpy
import sys
data = numpy.load(sys.argv[1])
print(data)
#!/bin/bash
case $1 in
c2137ba48e15d7699c1cc16a78f0e281d19e3d39)
echo -5270400
;;
1758bf02078496bcf7635014c3d7eadf824153c2)
echo 37
;;
050646899dae61bacd0941e1c0235bcee263db09)
echo 54
;;
esac
#!/bin/bash
case $1 in
c2137ba48e15d7699c1cc16a78f0e281d19e3d39)
echo -rotate 90
;;
#1758bf02078496bcf7635014c3d7eadf824153c2)
# echo 37
#;;
#050646899dae61bacd0941e1c0235bcee263db09)
# echo 54
#;;
esac
#!/usr/bin/env python3
import os
import os.path
from subprocess import Popen, DEVNULL
import sys
import json
import hashlib
destpath=sys.argv[1]
postfix=sys.argv[2]
with open('offsets.json') as offsetconf:
offsets = json.load(offsetconf)
fliplist = []
try:
with open('flip') as flipconf:
fliplist = flipconf.read().splitlines()
except:
pass
posfile = open(destpath + "/positions.tsv", 'w+')
for filename in os.listdir(os.getcwd()):
if not filename.endswith(".makerinfo"):
continue
prefix = '.'.join(filename.split('.')[:-1])
with open(filename, 'rb') as makerfile:
makerfile.read(64)
interesting = makerfile.read(19)
camera = hashlib.sha1(interesting).hexdigest()
offset = offsets[camera] or 0
with open(prefix + ".timestamp") as timefile:
time = int(timefile.readline())
correctedtime = time + offset
data = False
#ln -s file.JPG destpath/file.orig.jpg
try:
with open(prefix + ".JPG.json") as f:
data = json.load(f)
if data.validWheel:
posfile.write(camera + " " + str(correctedtime) + " " + str(data.wheelX) + " " + str(data.horizonY) + "\n")
except:
pass
destprefix = destpath + "/" + str(correctedtime) + "." + postfix
if not os.path.isfile(destprefix + ".jpg"):
try:
os.symlink(os.getcwd() + "/" + prefix + ".JPG", destprefix + ".orig.jpg")
except:
pass
if data:
try:
os.symlink(os.getcwd() + "/" + prefix + ".JPG.json", destprefix + ".json")
except:
pass
if camera in fliplist:
print("jpegtran -rotate 270 -outfile \"" + destprefix + ".rotated.jpg" + "\" \"" + prefix + ".JPG\"")
#print("convert \"" + prefix + ".JPG\" -rotate 270 \"" + destprefix + ".rotated.jpg\"")
else:
print("jpegtran -rotate 90 -outfile \"" + destprefix + ".rotated.jpg" + "\" \"" + prefix + ".JPG\"")
#print("convert \"" + prefix + ".JPG\" -rotate 90 \"" + destprefix + ".rotated.jpg\"")
posfile.close()
#!/usr/bin/env python3
import skimage
import glob
import numpy
import keras
from skimage import io
from skimage.color import rgb2gray
files = glob.glob("*.preview.png")
files.sort()
filenames = []
model = keras.models.load_model("/root/neural.save")
images = []
for filename in files:
image = io.imread(filename)
image = (rgb2gray(image) - 0.5) * 2
#image = numpy.reshape(image, (16, 16, 1))
if image.shape != (128, 128):
print("bad: " + filename)
continue
filenames.append(filename)
images.append(image)
numpydata = numpy.stack(images)
outputs = model.predict(numpydata, len(numpydata))
for (filename, output) in zip(filenames, outputs):
resultfile = '.'.join(filename.split('.')[:3]) + ".neural.npy"
numpy.save(resultfile, output)
#!/usr/bin/env python3
import skimage
import numpy
import random
import sys
from skimage import io
from skimage.color import gray2rgb
from scipy.ndimage.filters import gaussian_filter
filename = sys.argv[1]
prediction = numpy.load(filename)
prefix = '.'.join(filename.split('.')[:3])
lines = io.imread(prefix + ".lines.png")
if lines.shape == (128, 128, 4):
lines = lines[:,:,0]
if lines.shape != (128, 128):
print("bad: " + filename)
print(lines.shape)
sys.exit(1)
scanline = lines[65,:].astype(numpy.float32)
scanline = 1.0 - (scanline / 255.0)
for i in range(0, 16):
ratio = 1 - (i / 15)
scanline[112 + i] *= ratio
scanline = numpy.flipud(scanline)
scanline = list(scanline)
scanline.append(1)
scanline = numpy.array(scanline)
scanline_flat = 0.7 * scanline + 0.3 / 128
scanline_disp = scanline_flat / numpy.max(scanline_flat)
noline = False
lineindex = -1
scanline = scanline / numpy.sum(scanline)
neuralchance = []
for i in range(0,128):
neuralchance.append(prediction[int(i / 128.0 * 16) + 1])
neuralchance = gaussian_filter(neuralchance, sigma = 2)
neuralchance = list(neuralchance)
neuralchance.append(prediction[0])
neuralchance = numpy.array(neuralchance)
neuralchance = neuralchance / numpy.sum(neuralchance)
neuralchance_flat = 0.7 * neuralchance + 0.3 / 128
neuralchance_disp = neuralchance_flat / numpy.max(neuralchance_flat)
combined = scanline_flat * neuralchance_flat
if numpy.sum(combined) < 0.001:
combined[128] = 1
combined = combined / numpy.sum(combined)
if numpy.max(combined[:-1]) < combined[128]:
noline = True
softmax = [0] * 128
if not noline:
softmax = combined[:-1] / numpy.sum(combined[:-1])
softmax_disp = softmax / numpy.max(softmax)
chosen = 0.3
for i in range(len(softmax)):
prob = softmax[i]
chosen -= prob
if chosen <= 0:
lineindex = i
break
previewimg = io.imread(prefix + ".preview.png")
colorlines = gray2rgb(lines, alpha=False)
previewimg = numpy.minimum(previewimg, colorlines)
for x in range(8):
for y in range(8):
previewimg[y, x, :] = 255 * combined[128]
for i in range(128):
for y in range(16):
if not noline:
previewimg[65 + y, i, 0] = previewimg[65 + y, i, 0] * (1 - softmax_disp[127 - i]) + 255 * softmax_disp[127 - i]
previewimg[112 + y, i, 2] = previewimg[112 + y, i, 2] * (1 - neuralchance_disp[127 - i]) + 255 * neuralchance_disp[127 - i]
if not noline:
for y in range(17):
for x in range(int(-y / 3), int(y / 3) + 1):
relx = 127 - (lineindex - x)
if relx < 0 or relx >= 128:
continue
previewimg[64 - y, relx, 1] = (previewimg[64 - y, relx, 1] + 255) / 2
io.imsave(prefix + ".predicted.png", previewimg)
savedata = {
"lineIndex": lineindex,
"lineValid": not noline,
"scanline": scanline,
"neuralchance": neuralchance,
"softmax": softmax,
}
numpy.save(prefix + ".predicted.npy", savedata)
print(prefix + ".predicted.png")
#!/usr/bin/env python3
import skimage
import numpy
import random
import sys
import os
import glob
from skimage import io
from skimage.color import gray2rgb
from scipy.ndimage.filters import gaussian_filter
images = glob.glob("*.neural.npy")
images.sort()
curtime = int(images[0].split('.')[0])
state = {
"right": [1 / 128] * 129,
"left": [1 / 128] * 129,
}
state["right"] = numpy.array(state["right"])
state["left"] = numpy.array(state["left"])