Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
21_22-J 31
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
3
Merge Requests
3
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
21_22-J 31
21_22-J 31
Commits
0040477a
Commit
0040477a
authored
May 01, 2022
by
chalaka78
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
maintenance date changes
parent
f9f1b865
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
105 additions
and
26 deletions
+105
-26
Email_Generation.py
Email_Generation.py
+99
-0
User/views.py
User/views.py
+6
-26
db.sqlite3
db.sqlite3
+0
-0
No files found.
Email_Generation.py
0 → 100644
View file @
0040477a
import
pandas
as
pd
from
sklearn.model_selection
import
train_test_split
from
sklearn.linear_model
import
LogisticRegression
import
warnings
warnings
.
filterwarnings
(
"ignore"
)
df
=
pd
.
read_csv
(
'Data/MaintainData.csv'
)
#df = df.groupby('mailID')
#df = pd.DataFrame(df['Customer'].unique())
df
[
'a'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
.
diff
()
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"days"
,
""
)
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"NaT"
,
"30"
)
df
=
df
[(
df
[
"mailID"
]
==
'nbachalaka@gmail.com'
)]
X
=
df
[[
'TakenTime'
,
'Price'
,
'MeterReading'
]]
y
=
df
[
'a'
]
X_train
,
X_test
,
y_train
,
y_test
=
train_test_split
(
X
,
y
,
test_size
=
0.2
,
random_state
=
0
)
logreg
=
LogisticRegression
()
logreg
.
fit
(
X_train
,
y_train
)
Accuracy
=
logreg
.
score
(
X_test
,
y_test
)
MeterReading
=
df
[
'MeterReading'
]
.
mean
()
TakenTime
=
df
[
'TakenTime'
]
.
mean
()
Price
=
df
[
'Price'
]
.
mean
()
print
(
Price
)
new_input
=
[[
TakenTime
,
Price
,
MeterReading
]]
pred
=
logreg
.
predict
(
X
)
# print(pred)
# Assign predicted Dates to datafram
se
=
pd
.
Series
(
pred
)
df
[
'new_col'
]
=
se
.
values
df
[
'Date'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
df
[
'ComesON'
]
=
df
[
'Date'
]
+
pd
.
to_timedelta
(
df
[
'new_col'
]
.
astype
(
int
),
unit
=
'D'
)
print
(
df
)
# Get Today Date
from
datetime
import
date
today
=
date
.
today
()
print
(
today
)
# dd/mm/YY
d1
=
today
.
strftime
(
"
%
d/
%
m/
%
Y"
)
print
(
d1
)
# 3 days before predicted date
enddate
=
pd
.
to_datetime
(
d1
)
-
pd
.
DateOffset
(
days
=
3
)
print
(
enddate
)
# print(enddate.strftime("%d/%m/%Y"))
finalDate
=
enddate
.
strftime
(
"
%
d/
%
m/
%
Y"
)
# df = df[(df["ComesON"] == finalDate)]
print
(
df
)
PredictedMails
=
df
[
"mailID"
]
.
tolist
()
#print(PredictedMails)
# mail server
import
smtplib
gmail_user
=
'chalakasliit@gmail.com'
gmail_password
=
'SLIIT2022
%
'
sent_from
=
gmail_user
to
=
PredictedMails
# customer mails
subject
=
'Reminder about Your Vehicle Service'
body
=
'Still 3 Days for your Vehicle service!'
email_text
=
"""
\
From:
%
s
To:
%
s
Subject:
%
s
%
s
"""
%
(
sent_from
,
", "
.
join
(
to
),
subject
,
body
)
try
:
server
=
smtplib
.
SMTP_SSL
(
'smtp.gmail.com'
,
465
)
server
.
ehlo
()
server
.
login
(
gmail_user
,
gmail_password
)
server
.
sendmail
(
sent_from
,
to
,
email_text
)
server
.
close
()
print
(
"Email sent"
)
except
:
print
(
"No services available..."
)
# mail server
User/views.py
View file @
0040477a
...
@@ -838,7 +838,7 @@ def NextMaintain(request):
...
@@ -838,7 +838,7 @@ def NextMaintain(request):
myForm
=
MyFormServices
(
request
.
POST
)
myForm
=
MyFormServices
(
request
.
POST
)
#myForm3 = MyForm3(request.POST or None)
#myForm3 = MyForm3(request.POST or None)
#
price prediction with logitic
#
Next Service Date Prediction with logistic Regression
if
myForm
.
is_valid
():
if
myForm
.
is_valid
():
customerid
=
myForm
.
cleaned_data
[
'customerid'
]
customerid
=
myForm
.
cleaned_data
[
'customerid'
]
...
@@ -862,8 +862,6 @@ def NextMaintain(request):
...
@@ -862,8 +862,6 @@ def NextMaintain(request):
dataset
=
df
.
head
()
dataset
=
df
.
head
()
# print(dataset)
# print(dataset)
# do same but attach it to the dataframe
# do same but attach it to the dataframe
# df['Different'] = df.apply(lambda row: 45, axis=1)
df
[
'a'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
.
diff
()
df
[
'a'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
.
diff
()
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"days"
,
""
)
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"days"
,
""
)
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"NaT"
,
"30"
)
df
[
'a'
]
=
df
[
'a'
]
.
astype
(
str
)
.
str
.
replace
(
"NaT"
,
"30"
)
...
@@ -887,18 +885,16 @@ def NextMaintain(request):
...
@@ -887,18 +885,16 @@ def NextMaintain(request):
pred
=
int
(
pred
)
pred
=
int
(
pred
)
print
(
"--------------Customer will come after days---------------------"
)
print
(
"--------------Customer will come after days---------------------"
)
# print(pred)
# print(pred)
startdate
=
"
7/7
/2021"
startdate
=
"
10/31
/2021"
enddate
=
pd
.
to_datetime
(
startdate
)
+
pd
.
DateOffset
(
days
=
pred
)
enddate
=
pd
.
to_datetime
(
startdate
)
+
pd
.
DateOffset
(
days
=
pred
)
print
(
enddate
)
print
(
enddate
)
df
=
df
[:
1
]
df
=
df
[:
1
]
# data = data.to_html()
json_records
=
df
.
reset_index
()
.
to_json
(
orient
=
'records'
)
json_records
=
df
.
reset_index
()
.
to_json
(
orient
=
'records'
)
arr
=
[]
arr
=
[]
arr
=
json
.
loads
(
json_records
)
arr
=
json
.
loads
(
json_records
)
if
(
arr
==
[]
):
if
(
arr
==
[]
):
# data = data.to_html()
json_records
=
df
.
reset_index
()
.
to_json
(
orient
=
'records'
)
json_records
=
df
.
reset_index
()
.
to_json
(
orient
=
'records'
)
arr
=
[]
arr
=
[]
arr
=
json
.
loads
(
json_records
)
arr
=
json
.
loads
(
json_records
)
...
@@ -908,11 +904,8 @@ def NextMaintain(request):
...
@@ -908,11 +904,8 @@ def NextMaintain(request):
form
=
MyFormServices
()
form
=
MyFormServices
()
enddate
=
enddate
.
strftime
(
"
%
d/
%
m/
%
Y"
)
enddate
=
enddate
.
strftime
(
"
%
d/
%
m/
%
Y"
)
# parts = enddate.split(' ')
# message_count = int(parts[0])
# print(message_count)
#ARIMA SECTION
#Price Prediction with ARIMA
df2
=
pd
.
read_csv
(
'Data/MaintainData.csv'
)
df2
=
pd
.
read_csv
(
'Data/MaintainData.csv'
)
df2
=
df2
[(
df2
[
"Customer"
]
==
customerid
)]
df2
=
df2
[(
df2
[
"Customer"
]
==
customerid
)]
...
@@ -927,11 +920,6 @@ def NextMaintain(request):
...
@@ -927,11 +920,6 @@ def NextMaintain(request):
## Cleaning up the data
## Cleaning up the data
df
.
columns
=
[
"Date"
,
"Price"
]
df
.
columns
=
[
"Date"
,
"Price"
]
rrr
=
df
.
head
()
rrr
=
df
.
head
()
## Drop last 2 rows
# df.drop(106, axis=0, inplace=True)
# df.tail()
# df.drop(105, axis=0, inplace=True)
# df.tail()
# Convert Date into Datetime
# Convert Date into Datetime
df
[
'Date'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
df
[
'Date'
]
=
pd
.
to_datetime
(
df
[
'Date'
])
...
@@ -941,7 +929,7 @@ def NextMaintain(request):
...
@@ -941,7 +929,7 @@ def NextMaintain(request):
df
.
describe
()
df
.
describe
()
# df.plot()
# df.plot()
# plt.show()
# plt.show()
##
#
Testing For Stationarity
##Testing For Stationarity
from
statsmodels.tsa.stattools
import
adfuller
from
statsmodels.tsa.stattools
import
adfuller
test_result
=
adfuller
(
df
[
'Price'
])
test_result
=
adfuller
(
df
[
'Price'
])
...
@@ -959,31 +947,23 @@ def NextMaintain(request):
...
@@ -959,31 +947,23 @@ def NextMaintain(request):
"weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary "
)
"weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary "
)
adfuller_test
(
df
[
'Price'
])
adfuller_test
(
df
[
'Price'
])
#
#
Differencing
#Differencing
df
[
'Price First Difference'
]
=
df
[
'Price'
]
-
df
[
'Price'
]
.
shift
(
1
)
df
[
'Price First Difference'
]
=
df
[
'Price'
]
-
df
[
'Price'
]
.
shift
(
1
)
df
[
'Price'
]
.
shift
(
1
)
df
[
'Price'
]
.
shift
(
1
)
df
[
'Seasonal First Difference'
]
=
df
[
'Price'
]
-
df
[
'Price'
]
.
shift
(
12
)
df
[
'Seasonal First Difference'
]
=
df
[
'Price'
]
-
df
[
'Price'
]
.
shift
(
12
)
# df.head(14)
#
#
Again test dickey fuller test
#Again test dickey fuller test
adfuller_test
(
df
[
'Seasonal First Difference'
]
.
dropna
())
adfuller_test
(
df
[
'Seasonal First Difference'
]
.
dropna
())
import
statsmodels.api
as
sm
import
statsmodels.api
as
sm
# For non-seasonal data
# p=1, d=1, q=0 or 1
# from statsmodels.tsa.arima_model import ARIMA
from
statsmodels.tsa.arima.model
import
ARIMA
from
statsmodels.tsa.arima.model
import
ARIMA
model
=
ARIMA
(
df
[
'Price'
],
order
=
(
1
,
1
,
1
))
model
=
ARIMA
(
df
[
'Price'
],
order
=
(
1
,
1
,
1
))
model_fit
=
model
.
fit
()
model_fit
=
model
.
fit
()
# model_fit.summary()
# df['forecast'] = model_fit.predict(start=8, end=8, dynamic=True)
# df[['Price','forecast']].plot(figsize=(12,8))
model
=
sm
.
tsa
.
statespace
.
SARIMAX
(
df
[
'Price'
],
order
=
(
1
,
1
,
1
),
seasonal_order
=
(
1
,
1
,
1
,
12
))
model
=
sm
.
tsa
.
statespace
.
SARIMAX
(
df
[
'Price'
],
order
=
(
1
,
1
,
1
),
seasonal_order
=
(
1
,
1
,
1
,
12
))
results
=
model
.
fit
()
results
=
model
.
fit
()
from
pandas.tseries.offsets
import
DateOffset
from
pandas.tseries.offsets
import
DateOffset
future_dates
=
[
df
.
index
[
-
1
]
+
DateOffset
(
months
=
x
)
for
x
in
range
(
0
,
150
)]
future_dates
=
[
df
.
index
[
-
1
]
+
DateOffset
(
months
=
x
)
for
x
in
range
(
0
,
150
)]
future_datest_df
=
pd
.
DataFrame
(
index
=
future_dates
[
1
:],
columns
=
df
.
columns
)
future_datest_df
=
pd
.
DataFrame
(
index
=
future_dates
[
1
:],
columns
=
df
.
columns
)
# future_datest_df.tail()
future_df
=
pd
.
concat
([
df
,
future_datest_df
])
future_df
=
pd
.
concat
([
df
,
future_datest_df
])
...
...
db.sqlite3
View file @
0040477a
No preview for this file type
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment