Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-077
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
2
Issues
2
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
2020-077
2020-077
Commits
d15b632b
Commit
d15b632b
authored
Nov 05, 2020
by
Manoj Kumar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
code cleanup
parent
a9558498
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
39 additions
and
463 deletions
+39
-463
dataq/detect.py
dataq/detect.py
+23
-16
dataq/prepare.py
dataq/prepare.py
+0
-287
dataq/test.py
dataq/test.py
+0
-158
dataq/xml_to_csv.py
dataq/xml_to_csv.py
+16
-2
No files found.
dataq/detect.py
View file @
d15b632b
# * The detect.py file is created to implement the front-end of
# * English to SSL Translator
# *
# * @author Manoj Kumar | IT17050272
# * @version 1.0
# * @since 2020-09-12
#imports
import
numpy
as
np
import
os
import
sys
...
...
@@ -8,26 +16,23 @@ from PIL import Image
from
tensorflow
import
keras
sys
.
path
.
append
(
'.'
)
from
translation.modelUp
import
sign_predict
from
object_detection.utils
import
label_map_util
from
object_detection.utils
import
visualization_utils
as
vis_util
#Set Base Path
PATH
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
#Check TensorFlow version
if
StrictVersion
(
tf
.
__version__
)
<
StrictVersion
(
'1.15.3'
):
raise
ImportError
(
'Please upgrade your TensorFlow installation to v1.15.3 or later!'
)
print
(
tf
.
__version__
)
from
object_detection.utils
import
label_map_util
from
object_detection.utils
import
visualization_utils
as
vis_util
# Set Model Paths
MODEL_NAME
=
PATH
+
'
\
inference_graph'
PATH_TO_FROZEN_GRAPH
=
MODEL_NAME
+
'
\\
frozen_inference_graph.pb'
PATH_TO_LABELS
=
os
.
path
.
join
(
PATH
+
'
\\
training'
,
'labelmap.pbtxt'
)
#load tensorflow model to memory
detection_graph
=
tf
.
Graph
()
with
detection_graph
.
as_default
():
...
...
@@ -41,6 +46,7 @@ with detection_graph.as_default():
#map labels
category_index
=
label_map_util
.
create_category_index_from_labelmap
(
PATH_TO_LABELS
,
use_display_name
=
True
)
#run inference for a single frame
def
run_inference_for_single_image
(
image
,
graph
):
if
'detection_masks'
in
tensor_dict
:
# The following processing is only for single image
...
...
@@ -55,8 +61,7 @@ def run_inference_for_single_image(image, graph):
detection_masks_reframed
=
tf
.
cast
(
tf
.
greater
(
detection_masks_reframed
,
0.5
),
tf
.
uint8
)
# Follow the convention by adding back the batch dimension
tensor_dict
[
'detection_masks'
]
=
tf
.
expand_dims
(
detection_masks_reframed
,
0
)
tensor_dict
[
'detection_masks'
]
=
tf
.
expand_dims
(
detection_masks_reframed
,
0
)
image_tensor
=
tf
.
compat
.
v1
.
get_default_graph
()
.
get_tensor_by_name
(
'image_tensor:0'
)
# Run inference
...
...
@@ -65,16 +70,16 @@ def run_inference_for_single_image(image, graph):
# outputs are float32 numpy arrays
output_dict
[
'num_detections'
]
=
int
(
output_dict
[
'num_detections'
][
0
])
output_dict
[
'detection_classes'
]
=
output_dict
[
'detection_classes'
][
0
]
.
astype
(
np
.
uint8
)
output_dict
[
'detection_classes'
]
=
output_dict
[
'detection_classes'
][
0
]
.
astype
(
np
.
uint8
)
output_dict
[
'detection_boxes'
]
=
output_dict
[
'detection_boxes'
][
0
]
output_dict
[
'detection_scores'
]
=
output_dict
[
'detection_scores'
][
0
]
if
'detection_masks'
in
output_dict
:
output_dict
[
'detection_masks'
]
=
output_dict
[
'detection_masks'
][
0
]
return
output_dict
#initialize camera
cap
=
cv2
.
VideoCapture
(
0
)
try
:
with
detection_graph
.
as_default
():
with
tf
.
compat
.
v1
.
Session
()
as
sess
:
...
...
@@ -112,14 +117,16 @@ try:
line_thickness
=
8
)
score
=
round
(
100
*
output_dict
[
'detection_scores'
][
0
])
#send the request to
bavan
here
#send the request to
translation component
here
###
# I will be sending a POST request to u. a hand picture
if
score
>
80
:
print
(
image_np_expanded
.
shape
)
# sign_predict(image_np)
# sign_predict(image_np)
<-- still under development
#waiting for the API on that component to be built
# end send request
## Press Q to close the camera
cv2
.
imshow
(
'Hand Detector. Press Q to close'
,
cv2
.
resize
(
image_np
,
(
800
,
600
)))
if
cv2
.
waitKey
(
25
)
&
0xFF
==
ord
(
'q'
):
...
...
dataq/prepare.py
deleted
100644 → 0
View file @
a9558498
import
os
,
cv2
,
keras
import
pandas
as
pd
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
tensorflow
as
tf
from
skimage
import
io
import
json
path
=
"dataq
\\
images"
annot
=
"Airplanes_Annotations"
#load CSV
train
=
pd
.
read_csv
(
'./dataq/train.csv'
,
encoding
=
'utf-8'
)
print
(
'CSV Loaded'
)
print
(
tf
.
__version__
)
#put bounding box
for
index
,
row
in
train
.
iterrows
():
filename
=
row
[
"External ID"
]
print
(
'File Name:'
+
filename
)
image
=
io
.
imread
(
train
[
'Labeled Data'
][
index
])
labels
=
row
[
'Label'
]
label
=
json
.
loads
(
labels
)
label_obj_json
=
label
[
'objects'
][
0
]
bbox
=
label_obj_json
[
'bbox'
]
height
=
bbox
[
'height'
]
width
=
bbox
[
'width'
]
xmax
=
bbox
[
'left'
]
+
width
ymax
=
bbox
[
'top'
]
+
height
cv2
.
rectangle
(
image
,
(
bbox
[
'left'
],
bbox
[
'top'
]),
(
xmax
,
ymax
),
(
255
,
0
,
0
),
2
)
plt
.
figure
()
plt
.
imshow
(
image
)
plt
.
show
()
break
cv2
.
setUseOptimized
(
True
);
ss
=
cv2
.
ximgproc
.
segmentation
.
createSelectiveSearchSegmentation
()
im
=
cv2
.
imread
(
os
.
path
.
join
(
path
,
"image_001.jpg"
))
ss
.
setBaseImage
(
im
)
ss
.
switchToSelectiveSearchFast
()
rects
=
ss
.
process
()
imOut
=
im
.
copy
()
for
i
,
rect
in
(
enumerate
(
rects
)):
x
,
y
,
w
,
h
=
rect
# print(x,y,w,h)
# imOut = imOut[x:x+w,y:y+h]
cv2
.
rectangle
(
imOut
,
(
x
,
y
),
(
x
+
w
,
y
+
h
),
(
0
,
255
,
0
),
1
,
cv2
.
LINE_AA
)
# plt.figure()
plt
.
imshow
(
imOut
)
plt
.
show
()
train_images
=
[]
train_labels
=
[]
# get intersection over union
def
get_iou
(
bb1
,
bb2
):
assert
bb1
[
'x1'
]
<
bb1
[
'x2'
]
assert
bb1
[
'y1'
]
<
bb1
[
'y2'
]
assert
bb2
[
'x1'
]
<
bb2
[
'x2'
]
assert
bb2
[
'y1'
]
<
bb2
[
'y2'
]
print
(
'inside get IOU'
)
x_left
=
max
(
bb1
[
'x1'
],
bb2
[
'x1'
])
y_top
=
max
(
bb1
[
'y1'
],
bb2
[
'y1'
])
x_right
=
min
(
bb1
[
'x2'
],
bb2
[
'x2'
])
y_bottom
=
min
(
bb1
[
'y2'
],
bb2
[
'y2'
])
if
x_right
<
x_left
or
y_bottom
<
y_top
:
return
0.0
intersection_area
=
(
x_right
-
x_left
)
*
(
y_bottom
-
y_top
)
bb1_area
=
(
bb1
[
'x2'
]
-
bb1
[
'x1'
])
*
(
bb1
[
'y2'
]
-
bb1
[
'y1'
])
bb2_area
=
(
bb2
[
'x2'
]
-
bb2
[
'x1'
])
*
(
bb2
[
'y2'
]
-
bb2
[
'y1'
])
iou
=
intersection_area
/
float
(
bb1_area
+
bb2_area
-
intersection_area
)
assert
iou
>=
0.0
assert
iou
<=
1.0
return
iou
#again? IDK Why
ss
=
cv2
.
ximgproc
.
segmentation
.
createSelectiveSearchSegmentation
()
listOfImages
=
[]
for
imageFiles
in
os
.
listdir
(
path
):
listOfImages
.
append
(
imageFiles
)
print
(
listOfImages
)
#add bounding boxes
for
index
,
row
in
train
.
iterrows
():
image_name
=
row
[
"External ID"
]
try
:
if
image_name
.
startswith
(
"image"
)
and
image_name
in
listOfImages
:
gtvalues
=
[]
print
(
"Image Name:"
+
image_name
)
image
=
cv2
.
imread
((
os
.
path
.
join
(
path
,
image_name
)))
labels
=
row
[
'Label'
]
label
=
json
.
loads
(
labels
)
label_obj_json
=
label
[
'objects'
][
0
]
bbox
=
label_obj_json
[
'bbox'
]
height
=
bbox
[
'height'
]
width
=
bbox
[
'width'
]
x1
=
bbox
[
'left'
]
y1
=
bbox
[
'top'
]
x2
=
x1
+
width
y2
=
x2
+
height
gtvalues
.
append
({
"x1"
:
x1
,
"x2"
:
x2
,
"y1"
:
y1
,
"y2"
:
y2
})
ss
.
setBaseImage
(
image
)
ss
.
setBaseImage
(
image
)
ss
.
switchToSelectiveSearchFast
()
ssresults
=
ss
.
process
()
imout
=
image
.
copy
()
counter
=
0
falsecounter
=
0
flag
=
0
fflag
=
0
bflag
=
0
print
(
ssresults
)
for
e
,
result
in
enumerate
(
ssresults
):
if
e
<
2000
and
flag
==
0
:
for
gtval
in
gtvalues
:
x
,
y
,
w
,
h
=
result
iou
=
get_iou
(
gtval
,{
"x1"
:
x1
,
"x2"
:
x2
,
"y1"
:
y1
,
"y2"
:
y2
})
if
counter
<
30
:
if
iou
<
0.70
:
timage
=
imOut
[
y
:
y
+
h
,
x
:
x
+
w
]
resized
=
cv2
.
resize
(
timage
,(
224
,
224
),
interpolation
=
cv2
.
INTER_AREA
)
train_images
.
append
(
1
)
counter
+=
1
else
:
fflag
=
1
if
falsecounter
<
30
:
if
iou
<
0.30
:
timage
=
imout
[
y
:
y
+
h
,
x
:
x
+
w
]
resized
=
cv2
.
resize
(
timage
,
(
224
,
224
),
interpolation
=
cv2
.
INTER_AREA
)
train_images
.
append
(
resized
)
train_labels
.
append
(
0
)
falsecounter
+=
1
else
:
bflag
=
1
if
fflag
==
1
and
bflag
==
1
:
print
(
"inside"
)
flag
=
1
#print(image_name)
except
Exception
as
e
:
print
(
e
)
continue
X_new
=
np
.
array
(
train_images
)
y_new
=
np
.
array
(
train_labels
)
print
(
'Shape of X_new:'
)
X_new
.
shape
from
keras.layers
import
Dense
from
keras
import
Model
from
keras
import
optimizers
from
keras.preprocessing.image
import
ImageDataGenerator
from
keras.applications.vgg16
import
VGG16
vggmodel
=
VGG16
(
weights
=
'imagenet'
,
include_top
=
True
)
vggmodel
.
summary
()
for
layers
in
(
vggmodel
.
layers
)[:
15
]:
print
(
layers
)
layers
.
trainable
=
False
X
=
vggmodel
.
layers
[
-
2
]
.
output
predictions
=
Dense
(
2
,
activation
=
"softmax"
)(
X
)
model_final
=
Model
(
inputs
=
vggmodel
.
input
,
outputs
=
predictions
)
from
keras.optimizers
import
Adam
opt
=
Adam
(
learning_rate
=
0.0001
)
model_final
.
compile
(
loss
=
keras
.
losses
.
categorical_crossentropy
,
optimizer
=
opt
,
metrics
=
[
"accuracy"
])
model_final
.
summary
()
from
sklearn.model_selection
import
train_test_split
from
sklearn.preprocessing
import
LabelBinarizer
class
MyLabelBinarizer
(
LabelBinarizer
):
def
transform
(
self
,
y
):
Y
=
super
()
.
transform
(
y
)
if
self
.
y_type_
==
'binary'
:
return
np
.
hstack
((
Y
,
1
-
Y
))
else
:
return
Y
def
inverse_transform
(
self
,
Y
,
threshold
=
None
):
if
self
.
y_type_
==
'binary'
:
return
super
()
.
inverse_transform
(
Y
[:,
0
],
threshold
)
else
:
return
super
()
.
inverse_transform
(
Y
,
threshold
=
threshold
)
lenc
=
MyLabelBinarizer
()
Y
=
lenc
.
fit_transform
(
y_new
)
X_train
,
X_test
,
y_train
,
y_test
=
train_test_split
(
X_new
,
Y
,
test_size
=
0.25
)
print
(
X_train
.
shape
,
X_test
.
shape
,
y_train
.
shape
,
y_test
.
shape
)
trdata
=
ImageDataGenerator
(
horizontal_flip
=
True
,
vertical_flip
=
True
,
rotation_range
=
90
)
traindata
=
trdata
.
flow
(
x
=
X_train
,
y
=
y_train
)
tsdata
=
ImageDataGenerator
(
horizontal_flip
=
True
,
vertical_flip
=
True
,
rotation_range
=
90
)
testdata
=
tsdata
.
flow
(
x
=
X_test
,
y
=
y_test
)
from
keras.callbacks
import
ModelCheckpoint
,
EarlyStopping
checkpoint
=
ModelCheckpoint
(
"ieeercnn_vgg16_1.h5"
,
monitor
=
'val_loss'
,
verbose
=
1
,
save_best_only
=
True
,
save_weights_only
=
False
,
mode
=
'auto'
,
period
=
1
)
early
=
EarlyStopping
(
monitor
=
'val_loss'
,
min_delta
=
0
,
patience
=
100
,
verbose
=
1
,
mode
=
'auto'
)
hist
=
model_final
.
fit_generator
(
generator
=
traindata
,
steps_per_epoch
=
10
,
epochs
=
1000
,
validation_data
=
testdata
,
validation_steps
=
2
,
callbacks
=
[
checkpoint
,
early
])
import
matplotlib.pyplot
as
plt
# plt.plot(hist.history["acc"])
# plt.plot(hist.history['val_acc'])
plt
.
plot
(
hist
.
history
[
'loss'
])
plt
.
plot
(
hist
.
history
[
'val_loss'
])
plt
.
title
(
"model loss"
)
plt
.
ylabel
(
"Loss"
)
plt
.
xlabel
(
"Epoch"
)
plt
.
legend
([
"Loss"
,
"Validation Loss"
])
plt
.
show
()
plt
.
savefig
(
'chart loss.png'
)
im
=
X_test
[
1600
]
plt
.
imshow
(
im
)
img
=
np
.
expand_dims
(
im
,
axis
=
0
)
out
=
model_final
.
predict
(
img
)
if
out
[
0
][
0
]
>
out
[
0
][
1
]:
print
(
"hand"
)
else
:
print
(
"not a hand"
)
#### =====================TESTING=================#####
# z=0
# for e,i in enumerate(os.listdir(path)):
# if i.startswith("4"):
# z += 1
# img = cv2.imread(os.path.join(path,i))
# ss.setBaseImage(img)
# ss.switchToSelectiveSearchFast()
# ssresults = ss.process()
# imout = img.copy()
# for e,result in enumerate(ssresults):
# if e < 2000:
# x,y,w,h = result
# timage = imout[y:y+h,x:x+w]
# resized = cv2.resize(timage, (224,224), interpolation = cv2.INTER_AREA)
# img = np.expand_dims(resized, axis=0)
# out= model_final.predict(img)
# if out[0][0] > 0.65:
# cv2.rectangle(imout, (x, y), (x+w, y+h), (0, 255, 0), 1, cv2.LINE_AA)
# plt.figure()
# plt.imshow(imout)
\ No newline at end of file
dataq/test.py
deleted
100644 → 0
View file @
a9558498
import
os
import
tensorflow
as
tf
from
tensorflow
import
keras
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
cv2
import
pandas
as
pd
from
sklearn.model_selection
import
train_test_split
from
sklearn.metrics
import
confusion_matrix
print
(
"Tensorflow Version: "
+
tf
.
__version__
)
imagepaths
=
[]
for
root
,
dirs
,
files
in
os
.
walk
(
"."
,
topdown
=
False
):
for
name
in
files
:
path
=
os
.
path
.
join
(
root
,
name
)
if
path
.
endswith
(
"jpg"
):
imagepaths
.
append
(
path
)
print
(
len
(
imagepaths
))
def
plot_image
(
path
):
img
=
cv2
.
imread
(
path
)
img_cvt
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_BGR2GRAY
)
print
(
img_cvt
.
shape
)
plt
.
grid
(
False
)
plt
.
imshow
(
img_cvt
)
plt
.
xlabel
(
"Width"
)
plt
.
ylabel
(
"Height"
)
plt
.
title
(
"Image"
+
path
)
plt
.
show
()
plot_image
(
imagepaths
[
0
])
X
=
[]
y
=
[]
for
path
in
imagepaths
:
image
=
cv2
.
imread
(
path
)
image
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
image
=
cv2
.
resize
(
image
,(
320
,
120
))
X
.
append
(
image
)
label
=
int
(
path
.
split
(
"
\\
"
)[
3
])
y
.
append
(
label
)
X
=
np
.
array
(
X
,
dtype
=
"uint8"
)
X
=
X
.
reshape
(
len
(
imagepaths
),
120
,
320
,
1
)
y
=
np
.
array
(
y
)
print
(
"Images Loaded: "
,
len
(
X
))
print
(
"Labels Loaded: "
,
len
(
y
))
print
(
y
[
0
],
imagepaths
[
0
])
#TRAINING AND TESTING
test_score
=
0.2
X_train
,
X_test
,
y_train
,
y_test
=
train_test_split
(
X
,
y
,
test_size
=
test_score
,
random_state
=
100
)
print
(
y_test
)
from
keras.models
import
Sequential
from
keras.layers.convolutional
import
Conv2D
,
MaxPooling2D
from
keras.layers
import
Dense
,
Flatten
model
=
Sequential
()
model
.
add
(
Conv2D
(
32
,
(
5
,
5
),
activation
=
'relu'
,
input_shape
=
(
120
,
320
,
1
)))
model
.
add
(
MaxPooling2D
((
2
,
2
)))
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
activation
=
'relu'
))
model
.
add
(
MaxPooling2D
((
2
,
2
)))
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
activation
=
'relu'
))
model
.
add
(
MaxPooling2D
((
2
,
2
)))
model
.
add
(
Flatten
())
model
.
add
(
Dense
(
128
,
activation
=
'relu'
))
model
.
add
(
Dense
(
10
,
activation
=
'softmax'
))
model
.
compile
(
optimizer
=
'adam'
,
loss
=
'sparse_categorical_crossentropy'
,
metrics
=
[
'accuracy'
])
model
.
fit
(
X_train
,
y_train
,
epochs
=
5
,
batch_size
=
64
,
verbose
=
2
,
validation_data
=
(
X_test
,
y_test
))
model
.
save
(
'./dataq/handrecognition_model.h5'
)
# model = keras.models.load_model('handrecognition_model.h5')
# model.summary()
#testing
test_loss
,
test_acc
=
model
.
evaluate
(
X_test
,
y_test
)
print
(
'Test accuracy: {:2.2f}
%
'
.
format
(
test_acc
*
100
))
cam
=
cv2
.
VideoCapture
(
0
)
# while True:
# ret, image = cam.read()
# img = tf.keras.preprocessing.image.img_to_array(image)
# predictions = model.predict(img) # Make predictions towards the test set
# cv2.imshow('frame',frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
predictions
=
model
.
predict
(
X_test
)
# Make predictions towards the test set
print
(
predictions
)
print
(
np
.
argmax
(
predictions
[
0
]),
y_test
[
0
])
def
validate_9_images
(
predictions_array
,
true_label_array
,
img_array
):
# Array for pretty printing and then figure size
class_names
=
[
"no-hand"
,
"hand"
]
plt
.
figure
(
figsize
=
(
15
,
5
))
for
i
in
range
(
1
,
10
):
# Just assigning variables
prediction
=
predictions_array
[
i
]
true_label
=
true_label_array
[
i
]
img
=
img_array
[
i
]
img
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_GRAY2RGB
)
# Plot in a good way
plt
.
subplot
(
3
,
3
,
i
)
plt
.
grid
(
False
)
plt
.
xticks
([])
plt
.
yticks
([])
plt
.
imshow
(
img
,
cmap
=
plt
.
cm
.
binary
)
predicted_label
=
np
.
argmax
(
prediction
)
# Get index of the predicted label from prediction
# Change color of title based on good prediction or not
if
predicted_label
==
true_label
:
color
=
'blue'
else
:
color
=
'red'
plt
.
xlabel
(
"Predicted: {} {:2.0f}
%
(True: {})"
.
format
(
class_names
[
predicted_label
],
100
*
np
.
max
(
prediction
),
class_names
[
true_label
]),
color
=
color
)
plt
.
show
()
validate_9_images
(
predictions
,
y_test
,
X_test
)
y_pred
=
np
.
argmax
(
predictions
,
axis
=
1
)
# Transform predictions into 1-D array with label number
print
(
y_pred
)
df
=
pd
.
DataFrame
(
confusion_matrix
(
y_test
,
y_pred
),
columns
=
[
"Predicted No Hand"
,
"Predicted Hand"
],
index
=
[
"Actual No Hand"
,
"Actual Hand"
])
print
(
df
)
print
(
"Done!"
)
\ No newline at end of file
dataq/xml_to_csv.py
View file @
d15b632b
# * The detect.py file is created to implement the front-end of
# * English to SSL Translator
# *
# * @author Manoj Kumar | IT17050272
# * @version 1.0
# * @since 2020-09-23
#imports
import
os
import
glob
import
pandas
as
pd
import
xml.etree.ElementTree
as
ET
#converter *.xml file to *.csv
def
xml_to_csv
(
path
):
xml_list
=
[]
#Check all XML files in current directory
for
xml_file
in
glob
.
glob
(
path
+
'/*.xml'
):
tree
=
ET
.
parse
(
xml_file
)
root
=
tree
.
getroot
()
...
...
@@ -20,17 +30,21 @@ def xml_to_csv(path):
int
(
member
[
4
][
3
]
.
text
)
)
xml_list
.
append
(
value
)
#predefined column name
column_name
=
[
'filename'
,
'width'
,
'height'
,
'class'
,
'xmin'
,
'ymin'
,
'xmax'
,
'ymax'
]
xml_df
=
pd
.
DataFrame
(
xml_list
,
columns
=
column_name
)
return
xml_df
def
converter
():
#check directories : train and test
for
directory
in
[
'train'
,
'test'
]:
image_path
=
os
.
path
.
join
(
os
.
getcwd
(),
'images/{}'
.
format
(
directory
))
xml_df
=
xml_to_csv
(
image_path
)
xml_df
.
to_csv
(
'images/{}_labels.csv'
.
format
(
directory
),
index
=
None
)
print
(
'Successfully converted xml to csv.'
)
print
(
"xmltocsv"
)
#main method
converter
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment