Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2021-027
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
2021-27
2021-027
Commits
2f5a9ce3
Commit
2f5a9ce3
authored
Oct 22, 2021
by
Rifana F.N.F_IT16141902
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
prediction
parent
0a659d83
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
237 additions
and
0 deletions
+237
-0
model.h5
model.h5
+0
-0
neural_network.py
neural_network.py
+115
-0
predictor.py
predictor.py
+122
-0
No files found.
model.h5
0 → 100644
View file @
2f5a9ce3
File added
neural_network.py
0 → 100644
View file @
2f5a9ce3
import
numpy
as
np
import
pandas
as
pd
import
tensorflow
as
tf
from
sklearn.model_selection
import
train_test_split
from
tensorflow.keras
import
layers
from
tensorflow.keras.layers.experimental
import
preprocessing
dataframe
=
pd
.
read_csv
(
'Database_Final.csv'
)
print
(
dataframe
.
head
())
dataframe
[
'target'
]
=
dataframe
[
'Remaining space'
]
dataframe
=
dataframe
.
drop
(
columns
=
[
'Unnamed: 0'
,
'Remaining space'
])
# dataframe.head()
train
,
test
=
train_test_split
(
dataframe
,
test_size
=
0.2
)
train
,
val
=
train_test_split
(
train
,
test_size
=
0.2
)
print
(
len
(
train
),
'train examples'
)
print
(
len
(
val
),
'validation examples'
)
print
(
len
(
test
),
'test examples'
)
def
df_to_dataset
(
dataframe
,
label_column
,
shuffle
=
True
,
batch_size
=
32
):
dataframe
=
dataframe
.
copy
()
labels
=
dataframe
.
pop
(
label_column
)
#labels = dataframe[label_column]
ds
=
tf
.
data
.
Dataset
.
from_tensor_slices
((
dataframe
.
to_dict
(
orient
=
'list'
),
labels
))
if
shuffle
:
ds
=
ds
.
shuffle
(
buffer_size
=
len
(
dataframe
))
ds
=
ds
.
batch
(
batch_size
)
return
ds
batch_size
=
5
train_ds
=
df_to_dataset
(
train
,
'target'
,
batch_size
=
batch_size
)
[(
train_features
,
label_batch
)]
=
train_ds
.
take
(
1
)
print
(
'Every feature:'
,
list
(
train_features
.
keys
()))
print
(
'A batch of id:'
,
train_features
[
'ID'
])
print
(
'A batch of targets:'
,
label_batch
)
def
get_normalization_layer
(
name
,
dataset
):
# Create a Normalization layer for our feature.
normalizer
=
preprocessing
.
Normalization
(
axis
=
None
)
# Prepare a Dataset that only yields our feature.
feature_ds
=
dataset
.
map
(
lambda
x
,
y
:
x
[
name
])
# Learn the statistics of the data.
normalizer
.
adapt
(
feature_ds
)
return
normalizer
def
get_category_encoding_layer
(
name
,
dataset
,
dtype
,
max_tokens
=
None
):
# Create a StringLookup layer which will turn strings into integer indices
if
dtype
==
'string'
:
index
=
preprocessing
.
StringLookup
(
max_tokens
=
max_tokens
)
else
:
index
=
preprocessing
.
IntegerLookup
(
max_tokens
=
max_tokens
)
# Prepare a Dataset that only yields our feature
feature_ds
=
dataset
.
map
(
lambda
x
,
y
:
x
[
name
])
# Learn the set of possible values and assign them a fixed integer index.
index
.
adapt
(
feature_ds
)
# Create a Discretization for our integer indices.
encoder
=
preprocessing
.
CategoryEncoding
(
num_tokens
=
index
.
vocabulary_size
())
# Apply one-hot encoding to our indices. The lambda function captures the
# layer so we can use them, or include them in the functional model later.
return
lambda
feature
:
encoder
(
index
(
feature
))
batch_size
=
256
train_ds
=
df_to_dataset
(
train
,
'target'
,
batch_size
=
batch_size
)
val_ds
=
df_to_dataset
(
val
,
'target'
,
shuffle
=
False
,
batch_size
=
batch_size
)
test_ds
=
df_to_dataset
(
test
,
'target'
,
shuffle
=
False
,
batch_size
=
batch_size
)
all_inputs
=
[]
encoded_features
=
[]
# Numeric features.
for
header
in
[
'Entry'
,
'Exit'
]:
numeric_col
=
tf
.
keras
.
Input
(
shape
=
(
1
,),
name
=
header
)
normalization_layer
=
get_normalization_layer
(
header
,
train_ds
)
encoded_numeric_col
=
normalization_layer
(
numeric_col
)
all_inputs
.
append
(
numeric_col
)
encoded_features
.
append
(
encoded_numeric_col
)
categorical_cols
=
[
'Date'
,
'Time'
,
'Weather'
,
'Parking'
]
for
header
in
categorical_cols
:
categorical_col
=
tf
.
keras
.
Input
(
shape
=
(
1
,),
name
=
header
,
dtype
=
'string'
)
encoding_layer
=
get_category_encoding_layer
(
header
,
train_ds
,
dtype
=
'string'
,
max_tokens
=
5
)
encoded_categorical_col
=
encoding_layer
(
categorical_col
)
all_inputs
.
append
(
categorical_col
)
encoded_features
.
append
(
encoded_categorical_col
)
all_features
=
tf
.
keras
.
layers
.
concatenate
(
encoded_features
)
x
=
tf
.
keras
.
layers
.
Dense
(
64
,
activation
=
"relu"
)(
all_features
)
x
=
tf
.
keras
.
layers
.
Dropout
(
0.5
)(
x
)
output
=
tf
.
keras
.
layers
.
Dense
(
1
)(
x
)
model
=
tf
.
keras
.
Model
(
all_inputs
,
output
)
model
.
compile
(
optimizer
=
'adam'
,
loss
=
tf
.
keras
.
losses
.
BinaryCrossentropy
(
from_logits
=
True
),
metrics
=
[
"accuracy"
])
# rankdir='LR' is used to make the graph horizontal.
# tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
model
.
fit
(
train_ds
,
epochs
=
1
,
validation_data
=
val_ds
)
loss
,
accuracy
=
model
.
evaluate
(
test_ds
)
print
(
"Accuracy"
,
accuracy
*
100
,
'
%
'
)
\ No newline at end of file
predictor.py
0 → 100644
View file @
2f5a9ce3
import
pandas
as
pd
import
numpy
as
np
from
sklearn
import
linear_model
from
sklearn
import
preprocessing
,
svm
from
sklearn.model_selection
import
train_test_split
from
sklearn.linear_model
import
LinearRegression
from
sklearn.compose
import
ColumnTransformer
from
sklearn.preprocessing
import
OneHotEncoder
import
warnings
warnings
.
filterwarnings
(
"ignore"
)
df
=
pd
.
read_csv
(
'Database_Final.csv'
)
df
.
dropna
(
inplace
=
True
)
# print(df.head())
encoded
=
pd
.
get_dummies
(
df
[[
'Date'
,
'Time'
,
'Weather'
,
'Parking'
]],
drop_first
=
True
)
# df.dropna([columns=['Date', 'Time', 'Weather', 'Parking'], inplace=True)
df
=
df
.
join
(
encoded
)
# print(df.head())
df
[
'target'
]
=
df
[
'Remaining space'
]
df
=
df
.
drop
(
columns
=
[
'Unnamed: 0'
,
'Remaining space'
,
'ID'
,
'Date'
,
'Time'
,
'Weather'
,
'Parking'
])
# print(df.tail())
my_list
=
list
(
df
)
# print(my_list)
x
=
df
.
iloc
[:,
:
-
1
]
.
values
y
=
df
.
iloc
[:,
-
1
]
.
values
X_train
,
X_test
,
y_train
,
y_test
=
train_test_split
(
x
,
y
,
test_size
=
0.25
)
regr
=
LinearRegression
()
regr
.
fit
(
X_train
,
y_train
)
# print(regr.score(X_test, y_test))
# print(regr.predict([[14,7,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]))
date
=
[
'Monday'
,
'Saturday'
,
'Sunday'
,
'Thursday'
,
'Tuesday'
,
'Wednesday'
]
weather
=
[
'rainy'
,
'sunny'
,
'cloudy'
]
parking
=
[
'BAMBALAPITIYA'
,
'BORELLA'
,
'KOLLUPITIYA'
,
'WELLAWATHTHA'
]
time
=
[
'0700 - 0715'
,
'0715 - 0730'
,
'0730 - 0745'
,
'0745 - 0800'
,
'0800 - 0815'
,
'0815 - 0830'
,
'0830 - 0845'
,
'0845 - 0900'
]
Date_i
=
input
(
'Date: [Monday, Saturday, Sunday, Thursday, Tuesday, Wednesday] : select a value 1 - 6 :'
)
Time_i
=
input
(
'Time: [0700 - 0715, 0715 - 0730, 0730 - 0745, 0745 - 0800,0800 - 0815, 0815 - 0830, 0830 - 0845,0845 - 0900] : select a value 1 - 8 :'
)
Weather_i
=
input
(
'Weather: [rainy, sunny, cloudy] : select a value 1 - 3 :'
)
Parking_i
=
input
(
'Parking: [BAMBALAPITIYA, BORELLA, KOLLUPITIYA, WELLAWATHTHA] : select a value 1 - 4 :'
)
Entry
=
input
(
'Entry :'
)
Exit
=
input
(
'Exit :'
)
Date
=
date
[
int
(
Date_i
)
-
1
]
Time
=
time
[
int
(
Time_i
)
-
1
]
Weather
=
weather
[
int
(
Weather_i
)
-
1
]
Parking
=
parking
[
int
(
Parking_i
)
-
1
]
def
norm
(
r
):
if
(
int
(
r
)
<
0
):
return
0
else
:
return
r
def
encode
(
Date
,
Time
,
Weather
,
Parking
,
Entry
,
Exit
):
date
=
[
'Monday'
,
'Saturday'
,
'Sunday'
,
'Thursday'
,
'Tuesday'
,
'Wednesday'
]
date_val
=
[[
1
,
0
,
0
,
0
,
0
,
0
],[
0
,
1
,
0
,
0
,
0
,
0
],[
0
,
0
,
1
,
0
,
0
,
0
],[
0
,
0
,
0
,
1
,
0
,
0
],[
0
,
0
,
0
,
0
,
1
,
0
],[
0
,
0
,
0
,
0
,
0
,
1
]]
weather
=
[
'rainy'
,
'sunny'
,
'cloudy'
]
weather_val
=
[[
1
,
0
],
[
0
,
1
],
[
0
,
1
]]
parking
=
[
'BAMBALAPITIYA'
,
'BORELLA'
,
'KOLLUPITIYA'
,
'WELLAWATHTHA'
]
parking_val
=
[[
1
,
0
,
0
],[
1
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]]
time
=
[
'0700 - 0715'
,
'0715 - 0730'
,
'0730 - 0745'
,
'0745 - 0800'
,
'0800 - 0815'
,
'0815 - 0830'
,
'0830 - 0845'
,
'0845 - 0900'
]
time_val
=
[[
1
,
0
,
0
,
0
,
0
,
0
,
0
],[
1
,
0
,
0
,
0
,
0
,
0
,
0
],[
0
,
1
,
0
,
0
,
0
,
0
,
0
],[
0
,
0
,
1
,
0
,
0
,
0
,
0
],[
0
,
0
,
0
,
1
,
0
,
0
,
0
],[
0
,
0
,
0
,
0
,
1
,
0
,
0
],[
0
,
0
,
0
,
0
,
0
,
1
,
0
],[
0
,
0
,
0
,
0
,
0
,
0
,
1
]]
listofval
=
[]
listofval
.
append
([
Entry
,
Exit
])
j
=
0
for
i
in
date
:
if
(
Date
==
i
):
k
=
date_val
[
j
]
listofval
.
append
(
k
)
j
+=
1
a
=
0
for
i
in
weather
:
if
(
Weather
==
i
):
k
=
weather_val
[
a
]
listofval
.
append
(
k
)
a
+=
1
b
=
0
for
i
in
parking
:
if
(
Parking
==
i
):
k
=
parking_val
[
b
]
listofval
.
append
(
k
)
b
+=
1
c
=
0
for
i
in
time
:
if
(
Time
==
i
):
k
=
time_val
[
c
]
listofval
.
append
(
k
)
c
+=
1
flattened
=
[
val
for
sublist
in
listofval
for
val
in
sublist
]
# print(flattened)
return
flattened
inpt
=
encode
(
Date
,
Time
,
Weather
,
Parking
,
Entry
,
Exit
)
s
=
regr
.
predict
([
inpt
])
r
=
int
(
round
(
s
[
0
]))
# print(r)
out
=
norm
(
r
)
print
(
f
'remaining parking slots are {out}.'
)
# encoded = pd.get_dummies(myvar[['Date', 'Time', 'Weather', 'Parking']], drop_first=True)
# myvar = myvar.join(encoded)
# myvar = myvar.drop(columns=['Date', 'Time', 'Weather', 'Parking'])
# print(encoded)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment