Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
22_23-J 50
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
22_23-J 50
22_23-J 50
Commits
19ec8fbb
Commit
19ec8fbb
authored
May 15, 2023
by
MiyelandiMeerium
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
lib folder added
parent
072c6a40
Changes
16
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
1160 additions
and
0 deletions
+1160
-0
Frontend/LookAI/lib/main.dart
Frontend/LookAI/lib/main.dart
+41
-0
Frontend/LookAI/lib/models/face_data_model.dart
Frontend/LookAI/lib/models/face_data_model.dart
+70
-0
Frontend/LookAI/lib/models/models.dart
Frontend/LookAI/lib/models/models.dart
+1
-0
Frontend/LookAI/lib/services/custom/age_classifier.dart
Frontend/LookAI/lib/services/custom/age_classifier.dart
+97
-0
Frontend/LookAI/lib/services/custom/custom.dart
Frontend/LookAI/lib/services/custom/custom.dart
+3
-0
Frontend/LookAI/lib/services/custom/emotion_classifier.dart
Frontend/LookAI/lib/services/custom/emotion_classifier.dart
+125
-0
Frontend/LookAI/lib/services/custom/gender_classifier.dart
Frontend/LookAI/lib/services/custom/gender_classifier.dart
+102
-0
Frontend/LookAI/lib/services/face_detection_service.dart
Frontend/LookAI/lib/services/face_detection_service.dart
+108
-0
Frontend/LookAI/lib/services/image_picker_service.dart
Frontend/LookAI/lib/services/image_picker_service.dart
+46
-0
Frontend/LookAI/lib/services/services.dart
Frontend/LookAI/lib/services/services.dart
+3
-0
Frontend/LookAI/lib/views/home.dart
Frontend/LookAI/lib/views/home.dart
+74
-0
Frontend/LookAI/lib/views/image_classification_page.dart
Frontend/LookAI/lib/views/image_classification_page.dart
+166
-0
Frontend/LookAI/lib/views/video_classification_page.dart
Frontend/LookAI/lib/views/video_classification_page.dart
+293
-0
Frontend/LookAI/lib/views/views.dart
Frontend/LookAI/lib/views/views.dart
+1
-0
Frontend/LookAI/lib/widgets/face_painter.dart
Frontend/LookAI/lib/widgets/face_painter.dart
+29
-0
Frontend/LookAI/lib/widgets/widgets.dart
Frontend/LookAI/lib/widgets/widgets.dart
+1
-0
No files found.
Frontend/LookAI/lib/main.dart
0 → 100644
View file @
19ec8fbb
import
'package:emotion_detection/services/face_detection_service.dart'
;
import
'package:emotion_detection/views/views.dart'
;
import
'package:flutter/material.dart'
;
import
'package:provider/provider.dart'
;
void
main
(
List
<
String
>
args
)
{
WidgetsFlutterBinding
.
ensureInitialized
();
runApp
(
const
EmotionDetectionApp
());
}
class
EmotionDetectionApp
extends
StatefulWidget
{
const
EmotionDetectionApp
({
super
.
key
});
@override
State
<
EmotionDetectionApp
>
createState
()
=>
_EmotionDetectionStateApp
();
}
class
_EmotionDetectionStateApp
extends
State
<
EmotionDetectionApp
>
{
@override
Widget
build
(
BuildContext
context
)
{
return
MultiProvider
(
providers:
[
ChangeNotifierProvider
<
FaceDetectionsService
>(
create:
(
_
)
=>
FaceDetectionsService
()),
],
child:
MaterialApp
(
title:
"Look AI"
,
theme:
ThemeData
(
colorScheme:
ColorScheme
.
fromSeed
(
seedColor:
Colors
.
blue
),
elevatedButtonTheme:
ElevatedButtonThemeData
(
style:
ElevatedButton
.
styleFrom
(
shape:
const
StadiumBorder
(),
),
),
),
themeMode:
ThemeMode
.
light
,
home:
const
HomePage
(),
),
);
}
}
\ No newline at end of file
Frontend/LookAI/lib/models/face_data_model.dart
0 → 100644
View file @
19ec8fbb
import
'package:google_mlkit_face_detection/google_mlkit_face_detection.dart'
;
import
'package:image/image.dart'
as
img
;
import
'package:tflite_flutter_helper/tflite_flutter_helper.dart'
;
class
FaceData
{
Face
face
;
img
.
Image
?
image
;
Category
gender
;
Category
age
;
Category
emotion
;
double
?
smilingProbability
;
FaceData
({
required
this
.
face
,
this
.
image
,
required
this
.
gender
,
required
this
.
age
,
required
this
.
emotion
,
this
.
smilingProbability
,
});
String
getDescription
()
{
String
sentence
=
""
;
sentence
+=
_agePart
();
sentence
+=
"
${_genderPart()}
"
;
sentence
+=
"
${_lookPart()}
"
;
return
sentence
;
}
String
_agePart
()
{
if
(
age
.
score
<
1
)
{
return
"Unknown Years Old"
;
}
else
if
(
age
.
score
<
2
)
{
return
"One Year Old"
;
}
else
{
return
"
${age.label}
Years Old"
;
}
}
String
_genderPart
()
{
if
(
age
.
score
<
1
)
{
if
(
gender
.
label
.
toLowerCase
()
==
"male"
)
{
return
"Male"
;
}
else
{
return
"Female"
;
}
}
else
if
(
age
.
score
<
12
)
{
if
(
gender
.
label
.
toLowerCase
()
==
"male"
)
{
return
"Child Boy"
;
}
else
{
return
"Child Girl"
;
}
}
else
if
(
age
.
score
<
18
)
{
if
(
gender
.
label
.
toLowerCase
()
==
"male"
)
{
return
"Teenage Boy"
;
}
else
{
return
"Teenage Girl"
;
}
}
else
{
if
(
gender
.
label
.
toLowerCase
()
==
"male"
)
{
return
"Men"
;
}
else
{
return
"Woman"
;
}
}
}
String
_lookPart
()
{
return
"looking
${emotion.label.toLowerCase()}
"
;
}
}
Frontend/LookAI/lib/models/models.dart
0 → 100644
View file @
19ec8fbb
export
'face_data_model.dart'
;
\ No newline at end of file
Frontend/LookAI/lib/services/custom/age_classifier.dart
0 → 100644
View file @
19ec8fbb
import
'dart:developer'
;
import
'dart:math'
hide
log
;
import
'package:image/image.dart'
;
import
'package:tflite_flutter/tflite_flutter.dart'
;
import
'package:tflite_flutter_helper/tflite_flutter_helper.dart'
;
class
AgeClassifier
{
late
Interpreter
interpreter
;
late
InterpreterOptions
_interpreterOptions
;
late
List
<
int
>
_inputShape
;
late
List
<
int
>
_outputShape
;
late
TensorImage
_inputImage
;
late
TensorBuffer
_outputBuffer
;
late
TfLiteType
_inputType
;
late
TfLiteType
_outputType
;
late
SequentialProcessor
<
TensorBuffer
>
_probabilityProcessor
;
late
String
_modelName
;
late
NormalizeOp
_postProcessNormalizeOp
;
String
get
modelName
=>
_modelName
;
NormalizeOp
get
postProcessNormalizeOp
=>
_postProcessNormalizeOp
;
AgeClassifier
({
int
?
numThreads
})
{
_interpreterOptions
=
InterpreterOptions
();
if
(
numThreads
!=
null
)
{
_interpreterOptions
.
threads
=
numThreads
;
}
_modelName
=
'models/age_detection.tflite'
;
_postProcessNormalizeOp
=
NormalizeOp
(
0
,
1
);
loadModel
();
}
Future
<
void
>
loadModel
()
async
{
try
{
interpreter
=
await
Interpreter
.
fromAsset
(
modelName
,
options:
_interpreterOptions
);
_inputShape
=
interpreter
.
getInputTensor
(
0
).
shape
;
_inputType
=
interpreter
.
getInputTensor
(
0
).
type
;
_outputShape
=
interpreter
.
getOutputTensor
(
0
).
shape
;
_outputType
=
interpreter
.
getOutputTensor
(
0
).
type
;
_outputBuffer
=
TensorBuffer
.
createFixedSize
(
_outputShape
,
_outputType
);
_probabilityProcessor
=
TensorProcessorBuilder
().
add
(
postProcessNormalizeOp
).
build
();
}
catch
(
e
)
{
log
(
'Unable to create interpreter, Caught Exception:
${e.toString()}
'
);
}
}
TensorImage
_preProcess
()
{
int
cropSize
=
min
(
_inputImage
.
height
,
_inputImage
.
width
);
return
ImageProcessorBuilder
()
.
add
(
ResizeWithCropOrPadOp
(
cropSize
,
cropSize
))
.
add
(
ResizeOp
(
_inputShape
[
1
],
_inputShape
[
2
],
ResizeMethod
.
NEAREST_NEIGHBOUR
))
.
build
()
.
process
(
_inputImage
);
}
Category
predict
(
Image
image
)
{
_inputImage
=
TensorImage
(
_inputType
);
_inputImage
.
loadImage
(
image
);
_inputImage
=
_preProcess
();
TensorBuffer
inputBuffer
=
normalizeInputBuffer
(
_inputImage
.
getTensorBuffer
());
interpreter
.
run
(
inputBuffer
.
buffer
,
_outputBuffer
.
getBuffer
());
double
result
=
_probabilityProcessor
.
process
(
_outputBuffer
).
getDoubleList
().
first
;
int
age
=
result
.
toInt
();
return
Category
(
"
${age == 0.0 ? "Unknown" : age}
"
,
result
);
}
void
close
()
{
interpreter
.
close
();
}
TensorBuffer
normalizeInputBuffer
(
TensorBuffer
input
)
{
TensorBuffer
output
;
List
<
double
>
values
=
List
.
filled
(
input
.
getFlatSize
(),
0.0
,
growable:
false
);
for
(
int
i
=
0
;
i
<
input
.
getFlatSize
();
i
++)
{
values
[
i
]
=
input
.
getDoubleValue
(
i
)
/
255.0
;
}
if
(
input
.
isDynamic
)
{
output
=
TensorBuffer
.
createDynamic
(
_inputType
);
}
else
{
output
=
TensorBuffer
.
createFixedSize
(
_inputShape
,
_inputType
);
}
output
.
loadList
(
values
,
shape:
input
.
getShape
());
return
output
;
}
}
\ No newline at end of file
Frontend/LookAI/lib/services/custom/custom.dart
0 → 100644
View file @
19ec8fbb
export
'gender_classifier.dart'
;
export
'age_classifier.dart'
;
export
'emotion_classifier.dart'
;
\ No newline at end of file
Frontend/LookAI/lib/services/custom/emotion_classifier.dart
0 → 100644
View file @
19ec8fbb
import
'dart:developer'
;
import
'dart:math'
hide
log
;
import
'package:image/image.dart'
;
import
'package:tflite_flutter/tflite_flutter.dart'
;
import
'package:tflite_flutter_helper/tflite_flutter_helper.dart'
;
class
EmotionClassifier
{
late
Interpreter
interpreter
;
late
InterpreterOptions
_interpreterOptions
;
late
List
<
int
>
_inputShape
;
late
List
<
int
>
_outputShape
;
late
TensorImage
_inputImage
;
late
TensorBuffer
_outputBuffer
;
late
TfLiteType
_inputType
;
late
TfLiteType
_outputType
;
late
SequentialProcessor
<
TensorBuffer
>
_probabilityProcessor
;
late
String
_modelName
;
late
NormalizeOp
_postProcessNormalizeOp
;
String
get
modelName
=>
_modelName
;
NormalizeOp
get
postProcessNormalizeOp
=>
_postProcessNormalizeOp
;
EmotionClassifier
({
int
?
numThreads
})
{
_interpreterOptions
=
InterpreterOptions
();
if
(
numThreads
!=
null
)
{
_interpreterOptions
.
threads
=
numThreads
;
}
_modelName
=
'models/emotion_detection.tflite'
;
_postProcessNormalizeOp
=
NormalizeOp
(
0
,
1
);
loadModel
();
}
Future
<
void
>
loadModel
()
async
{
try
{
interpreter
=
await
Interpreter
.
fromAsset
(
modelName
,
options:
_interpreterOptions
);
_inputShape
=
interpreter
.
getInputTensor
(
0
).
shape
;
_inputType
=
interpreter
.
getInputTensor
(
0
).
type
;
_outputShape
=
interpreter
.
getOutputTensor
(
0
).
shape
;
_outputType
=
interpreter
.
getOutputTensor
(
0
).
type
;
_outputBuffer
=
TensorBuffer
.
createFixedSize
(
_outputShape
,
_outputType
);
_probabilityProcessor
=
TensorProcessorBuilder
().
add
(
postProcessNormalizeOp
).
build
();
}
catch
(
e
)
{
log
(
'Unable to create interpreter, Caught Exception:
${e.toString()}
'
);
}
}
TensorImage
_preProcess
()
{
int
cropSize
=
min
(
_inputImage
.
height
,
_inputImage
.
width
);
return
ImageProcessorBuilder
()
.
add
(
ResizeWithCropOrPadOp
(
cropSize
,
cropSize
))
.
add
(
ResizeOp
(
_inputShape
[
1
],
_inputShape
[
2
],
ResizeMethod
.
NEAREST_NEIGHBOUR
))
.
build
()
.
process
(
_inputImage
);
}
Category
predict
(
Image
image
)
{
_inputImage
=
TensorImage
(
_inputType
);
_inputImage
.
loadImage
(
image
);
_inputImage
=
_preProcess
();
TensorBuffer
inputBuffer
=
reshapeImageTensorBuffer
(
_inputImage
.
getTensorBuffer
());
inputBuffer
=
normalizeInputBuffer
(
inputBuffer
);
interpreter
.
run
(
inputBuffer
.
buffer
,
_outputBuffer
.
getBuffer
());
Map
<
String
,
double
>
labeledProb
=
TensorLabel
.
fromList
([
'Angry'
,
'Disgust'
,
'Fear'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
],
_probabilityProcessor
.
process
(
_outputBuffer
)).
getMapWithFloatValue
();
final
sortedProb
=
labeledProb
.
entries
.
toList
()..
sort
((
e1
,
e2
)
=>
e2
.
value
.
compareTo
(
e1
.
value
));
return
Category
(
sortedProb
[
0
].
key
,
sortedProb
[
0
].
value
);
}
void
close
()
{
interpreter
.
close
();
}
// Reshape Image Tensor Buffer to Gray Scale
TensorBuffer
reshapeImageTensorBuffer
(
TensorBuffer
input
)
{
List
<
int
>
shape
=
input
.
getShape
();
if
(
shape
.
last
==
1
)
{
TensorImage
image
=
TensorImage
(
TfLiteType
.
float32
);
image
.
loadTensorBuffer
(
input
);
return
image
.
getTensorBuffer
();
}
else
if
(
shape
.
last
!=
3
)
{
throw
Exception
(
'Input TensorBuffer shape is not supported.'
);
}
int
flatSize
=
input
.
getFlatSize
();
List
<
double
>
values
=
List
.
filled
(
1
*
shape
[
0
]
*
shape
[
1
]
*
1
,
0.0
,
growable:
false
);
for
(
int
i
=
0
;
i
<
flatSize
;
i
+=
3
)
{
values
[
i
~/
3
]
=
(
input
.
getDoubleValue
(
i
)
*
0.299
+
input
.
getDoubleValue
(
i
+
1
)
*
0.587
+
input
.
getDoubleValue
(
i
+
2
)
*
0.114
);
}
TensorBuffer
output
;
if
(
input
.
isDynamic
)
{
output
=
TensorBuffer
.
createDynamic
(
_inputType
);
}
else
{
output
=
TensorBuffer
.
createFixedSize
(
_inputShape
,
_inputType
);
}
output
.
loadList
(
values
,
shape:
[
1
,
shape
[
0
],
shape
[
1
],
1
]);
return
output
;
}
TensorBuffer
normalizeInputBuffer
(
TensorBuffer
input
)
{
TensorBuffer
output
;
List
<
double
>
values
=
List
.
filled
(
input
.
getFlatSize
(),
0.0
,
growable:
false
);
for
(
int
i
=
0
;
i
<
input
.
getFlatSize
();
i
++)
{
values
[
i
]
=
input
.
getDoubleValue
(
i
)
/
255.0
;
}
if
(
input
.
isDynamic
)
{
output
=
TensorBuffer
.
createDynamic
(
_inputType
);
}
else
{
output
=
TensorBuffer
.
createFixedSize
(
_inputShape
,
_inputType
);
}
output
.
loadList
(
values
,
shape:
input
.
getShape
());
return
output
;
}
}
\ No newline at end of file
Frontend/LookAI/lib/services/custom/gender_classifier.dart
0 → 100644
View file @
19ec8fbb
import
'dart:developer'
;
import
'dart:math'
hide
log
;
import
'package:image/image.dart'
;
import
'package:tflite_flutter/tflite_flutter.dart'
;
import
'package:tflite_flutter_helper/tflite_flutter_helper.dart'
;
class
GenderClassifier
{
late
Interpreter
interpreter
;
late
InterpreterOptions
_interpreterOptions
;
late
List
<
int
>
_inputShape
;
late
List
<
int
>
_outputShape
;
late
TensorImage
_inputImage
;
late
TensorBuffer
_outputBuffer
;
late
TfLiteType
_inputType
;
late
TfLiteType
_outputType
;
late
SequentialProcessor
<
TensorBuffer
>
_probabilityProcessor
;
late
String
_modelName
;
late
NormalizeOp
_postProcessNormalizeOp
;
String
get
modelName
=>
_modelName
;
NormalizeOp
get
postProcessNormalizeOp
=>
_postProcessNormalizeOp
;
GenderClassifier
({
int
?
numThreads
})
{
_interpreterOptions
=
InterpreterOptions
();
if
(
numThreads
!=
null
)
{
_interpreterOptions
.
threads
=
numThreads
;
}
_modelName
=
'models/gender_detection.tflite'
;
_postProcessNormalizeOp
=
NormalizeOp
(
0
,
1
);
loadModel
();
}
Future
<
void
>
loadModel
()
async
{
try
{
interpreter
=
await
Interpreter
.
fromAsset
(
modelName
,
options:
_interpreterOptions
);
_inputShape
=
interpreter
.
getInputTensor
(
0
).
shape
;
_inputType
=
interpreter
.
getInputTensor
(
0
).
type
;
_outputShape
=
interpreter
.
getOutputTensor
(
0
).
shape
;
_outputType
=
interpreter
.
getOutputTensor
(
0
).
type
;
_outputBuffer
=
TensorBuffer
.
createFixedSize
(
_outputShape
,
_outputType
);
_probabilityProcessor
=
TensorProcessorBuilder
().
add
(
postProcessNormalizeOp
).
build
();
}
catch
(
e
)
{
log
(
'Unable to create interpreter, Caught Exception:
${e.toString()}
'
);
}
}
TensorImage
_preProcess
()
{
int
cropSize
=
min
(
_inputImage
.
height
,
_inputImage
.
width
);
return
ImageProcessorBuilder
()
.
add
(
ResizeWithCropOrPadOp
(
cropSize
,
cropSize
))
.
add
(
ResizeOp
(
_inputShape
[
1
],
_inputShape
[
2
],
ResizeMethod
.
NEAREST_NEIGHBOUR
))
.
build
()
.
process
(
_inputImage
);
}
Category
predict
(
Image
image
)
{
_inputImage
=
TensorImage
(
_inputType
);
_inputImage
.
loadImage
(
image
);
_inputImage
=
_preProcess
();
TensorBuffer
inputBuffer
=
normalizeInputBuffer
(
_inputImage
.
getTensorBuffer
());
interpreter
.
run
(
inputBuffer
.
buffer
,
_outputBuffer
.
getBuffer
());
List
<
double
>
result
=
_probabilityProcessor
.
process
(
_outputBuffer
).
getDoubleList
();
log
(
'Gender Result:
$result
'
);
double
prediction
=
double
.
parse
(
result
.
first
.
toStringAsFixed
(
2
));
if
(
prediction
>=
0.5
)
{
return
Category
(
"Female"
,
prediction
);
}
else
{
return
Category
(
"Male"
,
prediction
);
}
}
void
close
()
{
interpreter
.
close
();
}
TensorBuffer
normalizeInputBuffer
(
TensorBuffer
input
)
{
TensorBuffer
output
;
List
<
double
>
values
=
List
.
filled
(
input
.
getFlatSize
(),
0.0
,
growable:
false
);
for
(
int
i
=
0
;
i
<
input
.
getFlatSize
();
i
++)
{
values
[
i
]
=
input
.
getDoubleValue
(
i
)
/
255.0
;
}
if
(
input
.
isDynamic
)
{
output
=
TensorBuffer
.
createDynamic
(
_inputType
);
}
else
{
output
=
TensorBuffer
.
createFixedSize
(
_inputShape
,
_inputType
);
}
output
.
loadList
(
values
,
shape:
input
.
getShape
());
return
output
;
}
}
\ No newline at end of file
Frontend/LookAI/lib/services/face_detection_service.dart
0 → 100644
View file @
19ec8fbb
import
'dart:async'
;
import
'dart:developer'
;
import
'dart:io'
;
import
'dart:ui'
as
ui
;
import
'package:emotion_detection/models/models.dart'
;
import
'package:flutter/material.dart'
;
import
'package:google_mlkit_face_detection/google_mlkit_face_detection.dart'
;
import
'custom/custom.dart'
;
import
'image_picker_service.dart'
;
class
FaceDetectionsService
with
ChangeNotifier
{
final
FaceDetectorOptions
_options
=
FaceDetectorOptions
(
enableClassification:
true
,
minFaceSize:
0.5
,
);
File
?
_imageFile
;
Size
_imageSize
=
Size
.
zero
;
final
List
<
FaceData
>
_detectedFaces
=
List
.
empty
(
growable:
true
);
final
StreamController
<
List
<
FaceData
>>
_onFaceDetect
=
StreamController
<
List
<
FaceData
>>.
broadcast
();
late
AgeClassifier
_ageClassifier
;
late
GenderClassifier
_genderClassifier
;
late
EmotionClassifier
_emotionClassifier
;
FaceDetectionsService
()
{
_ageClassifier
=
AgeClassifier
();
_genderClassifier
=
GenderClassifier
();
_emotionClassifier
=
EmotionClassifier
();
}
File
?
get
selectedImage
=>
_imageFile
;
Size
get
imageSize
=>
_imageSize
;
List
<
FaceData
>
get
detectedFaces
=>
_detectedFaces
;
Stream
<
List
<
FaceData
>>
get
onFaceDetect
=>
_onFaceDetect
.
stream
;
Future
loadImage
(
File
imageFile
)
async
{
_imageFile
=
imageFile
;
_detectedFaces
.
clear
();
_imageSize
=
await
_getImageResolution
();
notifyListeners
();
await
processImage
();
}
void
clearImage
({
bool
notify
=
false
})
{
_imageFile
=
null
;
_detectedFaces
.
clear
();
_onFaceDetect
.
add
(
List
.
empty
());
_imageSize
=
Size
.
zero
;
if
(
notify
)
notifyListeners
();
}
InputImage
_getInputImage
()
{
if
(
_imageFile
!=
null
)
{
return
InputImage
.
fromFile
(
_imageFile
!);
}
throw
Exception
(
"Load Image File before calling this method!"
);
}
Future
<
Size
>
_getImageResolution
()
async
{
if
(
_imageFile
==
null
)
{
return
Size
.
zero
;
}
final
Completer
<
ui
.
Image
>
completer
=
Completer
();
ui
.
decodeImageFromList
(
_imageFile
!.
readAsBytesSync
(),
(
ui
.
Image
img
)
{
return
completer
.
complete
(
img
);
});
return
completer
.
future
.
then
((
value
)
=>
Size
(
value
.
width
.
toDouble
(),
value
.
height
.
toDouble
()));
}
Future
processImage
()
async
{
FaceDetector
faceDetector
=
FaceDetector
(
options:
_options
);
try
{
InputImage
inputImage
=
_getInputImage
();
final
List
<
Face
>
faces
=
await
faceDetector
.
processImage
(
inputImage
);
log
(
"Total Faces Detected:
${faces.length}
"
);
for
(
var
element
in
faces
)
{
log
(
"Detected FaceData:
${element.boundingBox.toString()}
"
);
log
(
"Detected smilingProbability:
${element.smilingProbability}
"
);
final
image
=
await
ImagePickerService
.
getCroppedImageData
(
_imageFile
!,
element
.
boundingBox
);
if
(
image
!=
null
)
{
final
ageCategory
=
_ageClassifier
.
predict
(
image
);
final
genderCategory
=
_genderClassifier
.
predict
(
image
);
final
emotionCategory
=
_emotionClassifier
.
predict
(
image
);
log
(
"Predicted Label:
${ageCategory.label}
(
${ageCategory.score}
)"
);
log
(
"Predicted Label:
${genderCategory.label}
(
${genderCategory.score}
)"
);
log
(
"Predicted Label:
${emotionCategory.label}
(
${emotionCategory.score}
)"
);
_detectedFaces
.
add
(
FaceData
(
face:
element
,
image:
image
,
gender:
genderCategory
,
age:
ageCategory
,
emotion:
emotionCategory
,
smilingProbability:
element
.
smilingProbability
)
);
}
else
{
log
(
"Image Error"
);
}
}
notifyListeners
();
_onFaceDetect
.
add
(
_detectedFaces
);
faceDetector
.
close
();
}
catch
(
e
)
{
log
(
e
.
toString
());
_detectedFaces
.
clear
();
_onFaceDetect
.
add
(
List
.
empty
());
notifyListeners
();
faceDetector
.
close
();
}
}
}
\ No newline at end of file
Frontend/LookAI/lib/services/image_picker_service.dart
0 → 100644
View file @
19ec8fbb
import
'dart:developer'
;
import
'dart:io'
;
import
'dart:ui'
;
import
'package:image_picker/image_picker.dart'
;
import
'package:image/image.dart'
as
img
;
class
ImagePickerService
{
ImagePickerService
.
_
();
static
final
ImagePicker
_imagePicker
=
ImagePicker
();
static
Future
<
File
?>
pickImage
({
ImageSource
imageSource
=
ImageSource
.
gallery
})
async
{
try
{
final
XFile
?
image
=
await
_imagePicker
.
pickImage
(
source
:
imageSource
);
if
(
image
==
null
)
{
return
null
;
}
return
File
(
image
.
path
);
}
catch
(
e
)
{
log
(
e
.
toString
());
return
null
;
}
}
static
Future
<
File
?>
pickVideo
({
ImageSource
imageSource
=
ImageSource
.
gallery
})
async
{
try
{
final
XFile
?
image
=
await
_imagePicker
.
pickVideo
(
source
:
imageSource
);
if
(
image
==
null
)
{
return
null
;
}
return
File
(
image
.
path
);
}
catch
(
e
)
{
log
(
e
.
toString
());
return
null
;
}
}
static
Future
<
img
.
Image
?>
getCroppedImageData
(
File
imageFile
,
Rect
boundBox
)
async
{
final
image
=
img
.
decodeImage
(
imageFile
.
readAsBytesSync
());
if
(
image
==
null
)
{
return
null
;
}
final
croppedImage
=
img
.
copyCrop
(
image
,
boundBox
.
left
.
toInt
(),
boundBox
.
top
.
toInt
(),
boundBox
.
width
.
toInt
(),
boundBox
.
height
.
toInt
());
return
croppedImage
;
}
}
\ No newline at end of file
Frontend/LookAI/lib/services/services.dart
0 → 100644
View file @
19ec8fbb
export
'face_detection_service.dart'
;
export
'image_picker_service.dart'
;
export
'custom/custom.dart'
;
\ No newline at end of file
Frontend/LookAI/lib/views/home.dart
0 → 100644
View file @
19ec8fbb
import
'package:emotion_detection/services/services.dart'
;
import
'package:emotion_detection/views/image_classification_page.dart'
;
import
'package:emotion_detection/views/video_classification_page.dart'
;
import
'package:flutter/cupertino.dart'
;
import
'package:flutter/material.dart'
;
import
'package:provider/provider.dart'
;
class
HomePage
extends
StatefulWidget
{
const
HomePage
({
super
.
key
});
@override
State
<
HomePage
>
createState
()
=>
_HomePageState
();
}
class
_HomePageState
extends
State
<
HomePage
>
{
int
currentIndex
=
0
;
late
PageController
_pageController
;
@override
void
initState
()
{
_pageController
=
PageController
(
initialPage:
currentIndex
);
super
.
initState
();
}
@override
void
dispose
()
{
_pageController
.
dispose
();
super
.
dispose
();
}
@override
Widget
build
(
BuildContext
context
)
{
final
faceDetectionsService
=
Provider
.
of
<
FaceDetectionsService
>(
context
);
return
Scaffold
(
appBar:
AppBar
(
title:
const
Text
(
"Look AI"
),
),
body:
Column
(
children:
[
Container
(
padding:
const
EdgeInsets
.
symmetric
(
vertical:
16
,
horizontal:
16
),
width:
double
.
infinity
,
child:
CupertinoSlidingSegmentedControl
(
groupValue:
currentIndex
,
onValueChanged:
(
value
)
{
faceDetectionsService
.
clearImage
();
_pageController
.
animateToPage
(
value
??
0
,
duration:
const
Duration
(
milliseconds:
300
),
curve:
Curves
.
easeIn
);
},
children:
const
{
0
:
Text
(
"Image"
),
1
:
Text
(
"Video"
),
},
),
),
Expanded
(
child:
PageView
(
controller:
_pageController
,
physics:
const
NeverScrollableScrollPhysics
(),
onPageChanged:
(
value
)
{
setState
(()
{
currentIndex
=
value
;
});
},
children:
const
[
ImageClassificationPage
(),
VideoClassificationPage
(),
],
),
),
],
),
);
}
}
\ No newline at end of file
Frontend/LookAI/lib/views/image_classification_page.dart
0 → 100644
View file @
19ec8fbb
import
'dart:async'
;
import
'dart:io'
;
import
'package:adaptive_dialog/adaptive_dialog.dart'
;
import
'package:emotion_detection/models/face_data_model.dart'
;
import
'package:flutter/material.dart'
;
import
'package:flutter_tts/flutter_tts.dart'
;
import
'package:image_picker/image_picker.dart'
;
import
'package:provider/provider.dart'
;