Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
S
Smart E- Learn Tracer
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
23_22 - J 01
Smart E- Learn Tracer
Commits
f99df194
Commit
f99df194
authored
Jan 25, 2023
by
Shenthuri Vimaleshwaran
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upload New File
parent
f9fe2821
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
175 additions
and
0 deletions
+175
-0
eyemove_ment/mobiledetectionmodel.py
eyemove_ment/mobiledetectionmodel.py
+175
-0
No files found.
eyemove_ment/mobiledetectionmodel.py
0 → 100644
View file @
f99df194
import
os
import
numpy
as
np
import
torch
import
glob
import
torch.nn
as
nn
from
torchvision.transforms
import
transforms
from
torch.utils.data
import
DataLoader
from
torch.optim
import
Adam
from
torch.autograd
import
Variable
import
torchvision
import
pathlib
# checking for device
device
=
torch
.
device
(
'cuda'
if
torch
.
cuda
.
is_available
()
else
'cpu'
)
# Transforms
transformer
=
transforms
.
Compose
([
transforms
.
Resize
((
150
,
150
)),
transforms
.
RandomHorizontalFlip
(),
transforms
.
ToTensor
(),
# 0-255 to 0-1, numpy to tensors
transforms
.
Normalize
([
0.5
,
0.5
,
0.5
],
# 0-1 to [-1,1] , formula (x-mean)/std
[
0.5
,
0.5
,
0.5
])
])
# Dataloader
# Path for training and testing directory
fo
=
open
(
'paths'
,
'r'
)
.
read
()
.
splitlines
()
train_path
=
fo
[
0
]
test_path
=
fo
[
1
]
pred_path
=
fo
[
2
]
# train_path='/home/r33j4n/Desktop/CNN/Train'
# test_path='/home/r33j4n/Desktop/CNN/Test'
train_loader
=
DataLoader
(
torchvision
.
datasets
.
ImageFolder
(
train_path
,
transform
=
transformer
),
batch_size
=
64
,
shuffle
=
True
)
test_loader
=
DataLoader
(
torchvision
.
datasets
.
ImageFolder
(
test_path
,
transform
=
transformer
),
batch_size
=
64
,
shuffle
=
True
)
# categories
root
=
pathlib
.
Path
(
train_path
)
classes
=
sorted
([
j
.
name
.
split
(
'/'
)[
-
1
]
for
j
in
root
.
iterdir
()])
# CNN Network
class
ConvNet
(
nn
.
Module
):
def
__init__
(
self
,
num_classes
=
8
):
super
(
ConvNet
,
self
)
.
__init__
()
# Output size after convolution filter
# ((w-f+2P)/s) +1
# Input shape= (256,3,150,150)
self
.
conv1
=
nn
.
Conv2d
(
in_channels
=
3
,
out_channels
=
12
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
# Shape= (256,12,150,150)
self
.
bn1
=
nn
.
BatchNorm2d
(
num_features
=
12
)
# Shape= (256,12,150,150)
self
.
relu1
=
nn
.
ReLU
()
# Shape= (256,12,150,150)
self
.
pool
=
nn
.
MaxPool2d
(
kernel_size
=
2
)
# Reduce the image size be factor 2
# Shape= (256,12,75,75)
self
.
conv2
=
nn
.
Conv2d
(
in_channels
=
12
,
out_channels
=
20
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
# Shape= (256,20,75,75)
self
.
relu2
=
nn
.
ReLU
()
# Shape= (256,20,75,75)
self
.
conv3
=
nn
.
Conv2d
(
in_channels
=
20
,
out_channels
=
32
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
# Shape= (256,32,75,75)
self
.
bn3
=
nn
.
BatchNorm2d
(
num_features
=
32
)
# Shape= (256,32,75,75)
self
.
relu3
=
nn
.
ReLU
()
# Shape= (256,32,75,75)
self
.
fc
=
nn
.
Linear
(
in_features
=
75
*
75
*
32
,
out_features
=
num_classes
)
# Feed forwad function
def
forward
(
self
,
input
):
output
=
self
.
conv1
(
input
)
output
=
self
.
bn1
(
output
)
output
=
self
.
relu1
(
output
)
output
=
self
.
pool
(
output
)
output
=
self
.
conv2
(
output
)
output
=
self
.
relu2
(
output
)
output
=
self
.
conv3
(
output
)
output
=
self
.
bn3
(
output
)
output
=
self
.
relu3
(
output
)
# Above output will be in matrix form, with shape (256,32,75,75)
output
=
output
.
view
(
-
1
,
32
*
75
*
75
)
output
=
self
.
fc
(
output
)
return
output
model
=
ConvNet
(
num_classes
=
8
)
.
to
(
device
)
# Optmizer and loss function
optimizer
=
Adam
(
model
.
parameters
(),
lr
=
0.001
,
weight_decay
=
0.0001
)
loss_function
=
nn
.
CrossEntropyLoss
()
num_epochs
=
15
# calculating the size of training and testing images
train_count
=
len
(
glob
.
glob
(
train_path
+
'/**/*.jpeg'
))
test_count
=
len
(
glob
.
glob
(
test_path
+
'/**/*.jpeg'
))
# Model training and saving best model
best_accuracy
=
0.0
for
epoch
in
range
(
num_epochs
):
# Evaluation and training on training dataset
model
.
train
()
train_accuracy
=
0.0
train_loss
=
0.0
for
i
,
(
images
,
labels
)
in
enumerate
(
train_loader
):
if
torch
.
cuda
.
is_available
():
images
=
Variable
(
images
.
cuda
())
labels
=
Variable
(
labels
.
cuda
())
optimizer
.
zero_grad
()
outputs
=
model
(
images
)
loss
=
loss_function
(
outputs
,
labels
)
loss
.
backward
()
optimizer
.
step
()
train_loss
+=
loss
.
cpu
()
.
data
*
images
.
size
(
0
)
_
,
prediction
=
torch
.
max
(
outputs
.
data
,
1
)
train_accuracy
+=
int
(
torch
.
sum
(
prediction
==
labels
.
data
))
train_accuracy
=
train_accuracy
/
train_count
train_loss
=
train_loss
/
train_count
# Evaluation on testing dataset
model
.
eval
()
test_accuracy
=
0.0
for
i
,
(
images
,
labels
)
in
enumerate
(
test_loader
):
if
torch
.
cuda
.
is_available
():
images
=
Variable
(
images
.
cuda
())
labels
=
Variable
(
labels
.
cuda
())
outputs
=
model
(
images
)
_
,
prediction
=
torch
.
max
(
outputs
.
data
,
1
)
test_accuracy
+=
int
(
torch
.
sum
(
prediction
==
labels
.
data
))
test_accuracy
=
test_accuracy
/
test_count
print
(
'Epoch: '
+
str
(
epoch
)
+
' Train Loss: '
+
str
(
train_loss
)
+
' Train Accuracy: '
+
str
(
train_accuracy
)
+
' Test Accuracy: '
+
str
(
test_accuracy
))
# Save the best model
if
test_accuracy
>
best_accuracy
:
torch
.
save
(
model
.
state_dict
(),
'best_checkpoint.model'
)
best_accuracy
=
test_accuracy
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment