Commit 19ec8fbb authored by MiyelandiMeerium's avatar MiyelandiMeerium

lib folder added

parent 072c6a40
import 'package:emotion_detection/services/face_detection_service.dart';
import 'package:emotion_detection/views/views.dart';
import 'package:flutter/material.dart';
import 'package:provider/provider.dart';
void main(List<String> args) {
WidgetsFlutterBinding.ensureInitialized();
runApp(const EmotionDetectionApp());
}
class EmotionDetectionApp extends StatefulWidget {
const EmotionDetectionApp({super.key});
@override
State<EmotionDetectionApp> createState() => _EmotionDetectionStateApp();
}
class _EmotionDetectionStateApp extends State<EmotionDetectionApp> {
@override
Widget build(BuildContext context) {
return MultiProvider(
providers: [
ChangeNotifierProvider<FaceDetectionsService>(create: (_) => FaceDetectionsService()),
],
child: MaterialApp(
title: "Look AI",
theme: ThemeData(
colorScheme: ColorScheme.fromSeed(seedColor: Colors.blue),
elevatedButtonTheme: ElevatedButtonThemeData(
style: ElevatedButton.styleFrom(
shape: const StadiumBorder(),
),
),
),
themeMode: ThemeMode.light,
home: const HomePage(),
),
);
}
}
\ No newline at end of file
import 'package:google_mlkit_face_detection/google_mlkit_face_detection.dart';
import 'package:image/image.dart' as img;
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
class FaceData {
Face face;
img.Image? image;
Category gender;
Category age;
Category emotion;
double? smilingProbability;
FaceData({
required this.face,
this.image,
required this.gender,
required this.age,
required this.emotion,
this.smilingProbability,
});
String getDescription() {
String sentence = "";
sentence += _agePart();
sentence += " ${_genderPart()}";
sentence += " ${_lookPart()}";
return sentence;
}
String _agePart() {
if (age.score < 1) {
return "Unknown Years Old";
} else if (age.score < 2) {
return "One Year Old";
} else {
return "${age.label} Years Old";
}
}
String _genderPart() {
if (age.score < 1) {
if (gender.label.toLowerCase() == "male") {
return "Male";
} else {
return "Female";
}
} else if (age.score < 12) {
if (gender.label.toLowerCase() == "male") {
return "Child Boy";
} else {
return "Child Girl";
}
} else if (age. score < 18) {
if (gender.label.toLowerCase() == "male") {
return "Teenage Boy";
} else {
return "Teenage Girl";
}
} else {
if (gender.label.toLowerCase() == "male") {
return "Men";
} else {
return "Woman";
}
}
}
String _lookPart() {
return "looking ${emotion.label.toLowerCase()}";
}
}
export 'face_data_model.dart';
\ No newline at end of file
import 'dart:developer';
import 'dart:math' hide log;
import 'package:image/image.dart';
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
class AgeClassifier {
late Interpreter interpreter;
late InterpreterOptions _interpreterOptions;
late List<int> _inputShape;
late List<int> _outputShape;
late TensorImage _inputImage;
late TensorBuffer _outputBuffer;
late TfLiteType _inputType;
late TfLiteType _outputType;
late SequentialProcessor<TensorBuffer> _probabilityProcessor;
late String _modelName;
late NormalizeOp _postProcessNormalizeOp;
String get modelName => _modelName;
NormalizeOp get postProcessNormalizeOp => _postProcessNormalizeOp;
AgeClassifier({int? numThreads}) {
_interpreterOptions = InterpreterOptions();
if (numThreads != null) {
_interpreterOptions.threads = numThreads;
}
_modelName = 'models/age_detection.tflite';
_postProcessNormalizeOp = NormalizeOp(0, 1);
loadModel();
}
Future<void> loadModel() async {
try {
interpreter = await Interpreter.fromAsset(modelName, options: _interpreterOptions);
_inputShape = interpreter.getInputTensor(0).shape;
_inputType = interpreter.getInputTensor(0).type;
_outputShape = interpreter.getOutputTensor(0).shape;
_outputType = interpreter.getOutputTensor(0).type;
_outputBuffer = TensorBuffer.createFixedSize(_outputShape, _outputType);
_probabilityProcessor = TensorProcessorBuilder().add(postProcessNormalizeOp).build();
} catch (e) {
log('Unable to create interpreter, Caught Exception: ${e.toString()}');
}
}
TensorImage _preProcess() {
int cropSize = min(_inputImage.height, _inputImage.width);
return ImageProcessorBuilder()
.add(ResizeWithCropOrPadOp(cropSize, cropSize))
.add(ResizeOp(_inputShape[1], _inputShape[2], ResizeMethod.NEAREST_NEIGHBOUR))
.build()
.process(_inputImage);
}
Category predict(Image image) {
_inputImage = TensorImage(_inputType);
_inputImage.loadImage(image);
_inputImage = _preProcess();
TensorBuffer inputBuffer = normalizeInputBuffer(_inputImage.getTensorBuffer());
interpreter.run(inputBuffer.buffer, _outputBuffer.getBuffer());
double result = _probabilityProcessor.process(_outputBuffer).getDoubleList().first;
int age = result.toInt();
return Category("${age == 0.0 ? "Unknown" : age}", result);
}
void close() {
interpreter.close();
}
TensorBuffer normalizeInputBuffer(TensorBuffer input) {
TensorBuffer output;
List<double> values = List.filled(input.getFlatSize(), 0.0, growable: false);
for (int i = 0; i < input.getFlatSize(); i++) {
values[i] = input.getDoubleValue(i) / 255.0;
}
if (input.isDynamic) {
output = TensorBuffer.createDynamic(_inputType);
} else {
output = TensorBuffer.createFixedSize(_inputShape, _inputType);
}
output.loadList(values, shape: input.getShape());
return output;
}
}
\ No newline at end of file
export 'gender_classifier.dart';
export 'age_classifier.dart';
export 'emotion_classifier.dart';
\ No newline at end of file
import 'dart:developer';
import 'dart:math' hide log;
import 'package:image/image.dart';
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
class EmotionClassifier {
late Interpreter interpreter;
late InterpreterOptions _interpreterOptions;
late List<int> _inputShape;
late List<int> _outputShape;
late TensorImage _inputImage;
late TensorBuffer _outputBuffer;
late TfLiteType _inputType;
late TfLiteType _outputType;
late SequentialProcessor<TensorBuffer> _probabilityProcessor;
late String _modelName;
late NormalizeOp _postProcessNormalizeOp;
String get modelName => _modelName;
NormalizeOp get postProcessNormalizeOp => _postProcessNormalizeOp;
EmotionClassifier({int? numThreads}) {
_interpreterOptions = InterpreterOptions();
if (numThreads != null) {
_interpreterOptions.threads = numThreads;
}
_modelName = 'models/emotion_detection.tflite';
_postProcessNormalizeOp = NormalizeOp(0, 1);
loadModel();
}
Future<void> loadModel() async {
try {
interpreter = await Interpreter.fromAsset(modelName, options: _interpreterOptions);
_inputShape = interpreter.getInputTensor(0).shape;
_inputType = interpreter.getInputTensor(0).type;
_outputShape = interpreter.getOutputTensor(0).shape;
_outputType = interpreter.getOutputTensor(0).type;
_outputBuffer = TensorBuffer.createFixedSize(_outputShape, _outputType);
_probabilityProcessor = TensorProcessorBuilder().add(postProcessNormalizeOp).build();
} catch (e) {
log('Unable to create interpreter, Caught Exception: ${e.toString()}');
}
}
TensorImage _preProcess() {
int cropSize = min(_inputImage.height, _inputImage.width);
return ImageProcessorBuilder()
.add(ResizeWithCropOrPadOp(cropSize, cropSize))
.add(ResizeOp(_inputShape[1], _inputShape[2], ResizeMethod.NEAREST_NEIGHBOUR))
.build()
.process(_inputImage);
}
Category predict(Image image) {
_inputImage = TensorImage(_inputType);
_inputImage.loadImage(image);
_inputImage = _preProcess();
TensorBuffer inputBuffer = reshapeImageTensorBuffer(_inputImage.getTensorBuffer());
inputBuffer = normalizeInputBuffer(inputBuffer);
interpreter.run(inputBuffer.buffer, _outputBuffer.getBuffer());
Map<String, double> labeledProb = TensorLabel.fromList(['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise'], _probabilityProcessor.process(_outputBuffer)).getMapWithFloatValue();
final sortedProb = labeledProb.entries.toList()..sort((e1, e2) => e2.value.compareTo(e1.value));
return Category(sortedProb[0].key, sortedProb[0].value);
}
void close() {
interpreter.close();
}
// Reshape Image Tensor Buffer to Gray Scale
TensorBuffer reshapeImageTensorBuffer(TensorBuffer input) {
List<int> shape = input.getShape();
if (shape.last == 1) {
TensorImage image = TensorImage(TfLiteType.float32);
image.loadTensorBuffer(input);
return image.getTensorBuffer();
} else if (shape.last != 3) {
throw Exception('Input TensorBuffer shape is not supported.');
}
int flatSize = input.getFlatSize();
List<double> values = List.filled(1 * shape[0] * shape[1] * 1, 0.0, growable: false);
for (int i = 0; i < flatSize; i += 3) {
values[i ~/ 3] = (input.getDoubleValue(i) * 0.299 + input.getDoubleValue(i + 1) * 0.587 + input.getDoubleValue(i + 2) * 0.114);
}
TensorBuffer output;
if (input.isDynamic) {
output = TensorBuffer.createDynamic(_inputType);
} else {
output = TensorBuffer.createFixedSize(_inputShape, _inputType);
}
output.loadList(values, shape: [1, shape[0], shape[1], 1]);
return output;
}
TensorBuffer normalizeInputBuffer(TensorBuffer input) {
TensorBuffer output;
List<double> values = List.filled(input.getFlatSize(), 0.0, growable: false);
for (int i = 0; i < input.getFlatSize(); i++) {
values[i] = input.getDoubleValue(i) / 255.0;
}
if (input.isDynamic) {
output = TensorBuffer.createDynamic(_inputType);
} else {
output = TensorBuffer.createFixedSize(_inputShape, _inputType);
}
output.loadList(values, shape: input.getShape());
return output;
}
}
\ No newline at end of file
import 'dart:developer';
import 'dart:math' hide log;
import 'package:image/image.dart';
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
class GenderClassifier {
late Interpreter interpreter;
late InterpreterOptions _interpreterOptions;
late List<int> _inputShape;
late List<int> _outputShape;
late TensorImage _inputImage;
late TensorBuffer _outputBuffer;
late TfLiteType _inputType;
late TfLiteType _outputType;
late SequentialProcessor<TensorBuffer> _probabilityProcessor;
late String _modelName;
late NormalizeOp _postProcessNormalizeOp;
String get modelName => _modelName;
NormalizeOp get postProcessNormalizeOp => _postProcessNormalizeOp;
GenderClassifier({int? numThreads}) {
_interpreterOptions = InterpreterOptions();
if (numThreads != null) {
_interpreterOptions.threads = numThreads;
}
_modelName = 'models/gender_detection.tflite';
_postProcessNormalizeOp = NormalizeOp(0, 1);
loadModel();
}
Future<void> loadModel() async {
try {
interpreter = await Interpreter.fromAsset(modelName, options: _interpreterOptions);
_inputShape = interpreter.getInputTensor(0).shape;
_inputType = interpreter.getInputTensor(0).type;
_outputShape = interpreter.getOutputTensor(0).shape;
_outputType = interpreter.getOutputTensor(0).type;
_outputBuffer = TensorBuffer.createFixedSize(_outputShape, _outputType);
_probabilityProcessor = TensorProcessorBuilder().add(postProcessNormalizeOp).build();
} catch (e) {
log('Unable to create interpreter, Caught Exception: ${e.toString()}');
}
}
TensorImage _preProcess() {
int cropSize = min(_inputImage.height, _inputImage.width);
return ImageProcessorBuilder()
.add(ResizeWithCropOrPadOp(cropSize, cropSize))
.add(ResizeOp(_inputShape[1], _inputShape[2], ResizeMethod.NEAREST_NEIGHBOUR))
.build()
.process(_inputImage);
}
Category predict(Image image) {
_inputImage = TensorImage(_inputType);
_inputImage.loadImage(image);
_inputImage = _preProcess();
TensorBuffer inputBuffer = normalizeInputBuffer(_inputImage.getTensorBuffer());
interpreter.run(inputBuffer.buffer, _outputBuffer.getBuffer());
List<double> result = _probabilityProcessor.process(_outputBuffer).getDoubleList();
log('Gender Result: $result');
double prediction = double.parse(result.first.toStringAsFixed(2));
if (prediction >= 0.5) {
return Category("Female", prediction);
} else {
return Category("Male", prediction);
}
}
void close() {
interpreter.close();
}
TensorBuffer normalizeInputBuffer(TensorBuffer input) {
TensorBuffer output;
List<double> values = List.filled(input.getFlatSize(), 0.0, growable: false);
for (int i = 0; i < input.getFlatSize(); i++) {
values[i] = input.getDoubleValue(i) / 255.0;
}
if (input.isDynamic) {
output = TensorBuffer.createDynamic(_inputType);
} else {
output = TensorBuffer.createFixedSize(_inputShape, _inputType);
}
output.loadList(values, shape: input.getShape());
return output;
}
}
\ No newline at end of file
import 'dart:async';
import 'dart:developer';
import 'dart:io';
import 'dart:ui' as ui;
import 'package:emotion_detection/models/models.dart';
import 'package:flutter/material.dart';
import 'package:google_mlkit_face_detection/google_mlkit_face_detection.dart';
import 'custom/custom.dart';
import 'image_picker_service.dart';
class FaceDetectionsService with ChangeNotifier {
final FaceDetectorOptions _options = FaceDetectorOptions(
enableClassification: true,
minFaceSize: 0.5,
);
File? _imageFile;
Size _imageSize = Size.zero;
final List<FaceData> _detectedFaces = List.empty(growable: true);
final StreamController<List<FaceData>> _onFaceDetect = StreamController<List<FaceData>>.broadcast();
late AgeClassifier _ageClassifier;
late GenderClassifier _genderClassifier;
late EmotionClassifier _emotionClassifier;
FaceDetectionsService() {
_ageClassifier = AgeClassifier();
_genderClassifier = GenderClassifier();
_emotionClassifier = EmotionClassifier();
}
File? get selectedImage => _imageFile;
Size get imageSize => _imageSize;
List<FaceData> get detectedFaces => _detectedFaces;
Stream<List<FaceData>> get onFaceDetect => _onFaceDetect.stream;
Future loadImage(File imageFile) async {
_imageFile = imageFile;
_detectedFaces.clear();
_imageSize = await _getImageResolution();
notifyListeners();
await processImage();
}
void clearImage({bool notify = false}) {
_imageFile = null;
_detectedFaces.clear();
_onFaceDetect.add(List.empty());
_imageSize = Size.zero;
if (notify) notifyListeners();
}
InputImage _getInputImage() {
if (_imageFile != null) {
return InputImage.fromFile(_imageFile!);
}
throw Exception("Load Image File before calling this method!");
}
Future<Size> _getImageResolution() async {
if (_imageFile == null) {
return Size.zero;
}
final Completer<ui.Image> completer = Completer();
ui.decodeImageFromList(_imageFile!.readAsBytesSync(), (ui.Image img) {
return completer.complete(img);
});
return completer.future.then((value) => Size(value.width.toDouble(), value.height.toDouble()));
}
Future processImage() async {
FaceDetector faceDetector = FaceDetector(options: _options);
try {
InputImage inputImage = _getInputImage();
final List<Face> faces = await faceDetector.processImage(inputImage);
log("Total Faces Detected: ${faces.length}");
for (var element in faces) {
log("Detected FaceData: ${element.boundingBox.toString()}");
log("Detected smilingProbability: ${element.smilingProbability}");
final image = await ImagePickerService.getCroppedImageData(_imageFile!, element.boundingBox);
if (image != null) {
final ageCategory = _ageClassifier.predict(image);
final genderCategory = _genderClassifier.predict(image);
final emotionCategory = _emotionClassifier.predict(image);
log("Predicted Label: ${ageCategory.label} (${ageCategory.score})");
log("Predicted Label: ${genderCategory.label} (${genderCategory.score})");
log("Predicted Label: ${emotionCategory.label} (${emotionCategory.score})");
_detectedFaces.add(
FaceData(face: element, image: image, gender: genderCategory, age: ageCategory, emotion: emotionCategory, smilingProbability: element.smilingProbability)
);
} else {
log("Image Error");
}
}
notifyListeners();
_onFaceDetect.add(_detectedFaces);
faceDetector.close();
} catch (e) {
log(e.toString());
_detectedFaces.clear();
_onFaceDetect.add(List.empty());
notifyListeners();
faceDetector.close();
}
}
}
\ No newline at end of file
import 'dart:developer';
import 'dart:io';
import 'dart:ui';
import 'package:image_picker/image_picker.dart';
import 'package:image/image.dart' as img;
class ImagePickerService {
ImagePickerService._();
static final ImagePicker _imagePicker = ImagePicker();
static Future<File?> pickImage({ImageSource imageSource = ImageSource.gallery}) async {
try {
final XFile? image = await _imagePicker.pickImage(source: imageSource);
if (image == null) {
return null;
}
return File(image.path);
} catch(e) {
log(e.toString());
return null;
}
}
static Future<File?> pickVideo({ImageSource imageSource = ImageSource.gallery}) async {
try {
final XFile? image = await _imagePicker.pickVideo(source: imageSource);
if (image == null) {
return null;
}
return File(image.path);
} catch(e) {
log(e.toString());
return null;
}
}
static Future<img.Image?> getCroppedImageData(File imageFile, Rect boundBox) async {
final image = img.decodeImage(imageFile.readAsBytesSync());
if (image == null) {
return null;
}
final croppedImage = img.copyCrop(image, boundBox.left.toInt(), boundBox.top.toInt(), boundBox.width.toInt(), boundBox.height.toInt());
return croppedImage;
}
}
\ No newline at end of file
export 'face_detection_service.dart';
export 'image_picker_service.dart';
export 'custom/custom.dart';
\ No newline at end of file
import 'package:emotion_detection/services/services.dart';
import 'package:emotion_detection/views/image_classification_page.dart';
import 'package:emotion_detection/views/video_classification_page.dart';
import 'package:flutter/cupertino.dart';
import 'package:flutter/material.dart';
import 'package:provider/provider.dart';
class HomePage extends StatefulWidget {
const HomePage({super.key});
@override
State<HomePage> createState() => _HomePageState();
}
class _HomePageState extends State<HomePage> {
int currentIndex = 0;
late PageController _pageController;
@override
void initState() {
_pageController = PageController(initialPage: currentIndex);
super.initState();
}
@override
void dispose() {
_pageController.dispose();
super.dispose();
}
@override
Widget build(BuildContext context) {
final faceDetectionsService = Provider.of<FaceDetectionsService>(context);
return Scaffold(
appBar: AppBar(
title: const Text("Look AI"),
),
body: Column(
children: [
Container(
padding: const EdgeInsets.symmetric(vertical: 16, horizontal: 16),
width: double.infinity,
child: CupertinoSlidingSegmentedControl(
groupValue: currentIndex,
onValueChanged: (value) {
faceDetectionsService.clearImage();
_pageController.animateToPage(value ?? 0, duration: const Duration(milliseconds: 300), curve: Curves.easeIn);
},
children: const {
0: Text("Image"),
1: Text("Video"),
},
),
),
Expanded(
child: PageView(
controller: _pageController,
physics: const NeverScrollableScrollPhysics(),
onPageChanged: (value) {
setState(() {
currentIndex = value;
});
},
children: const [
ImageClassificationPage(),
VideoClassificationPage(),
],
),
),
],
),
);
}
}
\ No newline at end of file
import 'dart:async';
import 'dart:io';
import 'package:adaptive_dialog/adaptive_dialog.dart';
import 'package:emotion_detection/models/face_data_model.dart';
import 'package:flutter/material.dart';
import 'package:flutter_tts/flutter_tts.dart';
import 'package:image_picker/image_picker.dart';
import 'package:provider/provider.dart';
import 'package:emotion_detection/services/services.dart';
import 'package:emotion_detection/widgets/widgets.dart';
class ImageClassificationPage extends StatefulWidget {
const ImageClassificationPage({super.key});
@override
State<ImageClassificationPage> createState() => _ImageClassificationPageState();
}
class _ImageClassificationPageState extends State<ImageClassificationPage> {
late FaceDetectionsService _faceDetectionsService;
bool isProcessing = false;
late FlutterTts _flutterTts;
late StreamSubscription<List<FaceData>> _streamSubscription;
@override
void initState() {
_flutterTts = FlutterTts();
_faceDetectionsService = Provider.of<FaceDetectionsService>(context, listen: false);
_streamSubscription = _faceDetectionsService.onFaceDetect.listen((event) => onFaceDetect(event));
WidgetsBinding.instance.addPostFrameCallback((timeStamp) => configTts());
super.initState();
}
void configTts() {
}
void onFaceDetect(List<FaceData> event) async {
await stop();
if (event.isNotEmpty) {
await Future.delayed(const Duration(milliseconds: 500));
String toSpeak = "${event.length} Face Detected.";
for (var element in event) {
toSpeak += " ${element.getDescription()}";
}
await speak(toSpeak);
}
}
Future speak(String text) async{
await _flutterTts.speak(text);
}
Future stop() async{
await _flutterTts.stop();
}
@override
void dispose() {
_streamSubscription.cancel();
_faceDetectionsService.clearImage();
super.dispose();
}
@override
Widget build(BuildContext context) {
final faceDetectionsService = Provider.of<FaceDetectionsService>(context);
return Scaffold(
body: Column(
children: [
AspectRatio(
aspectRatio: 1,
child: AnimatedBuilder(
animation: faceDetectionsService,
builder: (context, child) {
if (faceDetectionsService.selectedImage != null) {
return Center(
child: CustomPaint(
foregroundPainter: FacePainter(
faces: faceDetectionsService.detectedFaces.map((e) => e.face.boundingBox).toList(),
imageResolution: faceDetectionsService.imageSize,
),
child: Image(
image: FileImage(faceDetectionsService.selectedImage!),
fit: BoxFit.contain,
),
),
);
} else {
return Icon(Icons.image_search, size: 120, color: Theme.of(context).primaryColor);
}
},
),
),
const SizedBox(height: 24),
Expanded(
child: ListView.builder(
itemCount: faceDetectionsService.detectedFaces.length,
padding: const EdgeInsets.symmetric(horizontal: 16),
itemBuilder: (context, index) {
return Container(
margin: const EdgeInsets.symmetric(vertical: 4),
decoration: BoxDecoration(
border: Border.all(
color: Colors.deepPurple.shade400,
),
borderRadius: BorderRadius.circular(8),
),
child: ListTile(
leading: Text((index + 1).toString().padLeft(2, "0")),
title: Text(faceDetectionsService.detectedFaces[index].getDescription()),
),
);
},
),
),
],
),
bottomNavigationBar: BottomAppBar(
child: Padding(
padding: const EdgeInsets.symmetric(horizontal: 20, vertical: 12),
child: Row(
mainAxisAlignment: MainAxisAlignment.spaceAround,
children: [
ElevatedButton.icon(
onPressed: () async {
ImageSource? result = await showModalActionSheet(
context: context,
title: "Select Image Source",
actions: [
const SheetAction(label: "Gallery", key: ImageSource.gallery, isDefaultAction: true),
const SheetAction(label: "Camera", key: ImageSource.camera),
]
);
final File? image = await ImagePickerService.pickImage(imageSource: result ?? ImageSource.gallery);
if (image != null) {
setState(() {
isProcessing = true;
});
await faceDetectionsService.loadImage(image);
setState(() {
isProcessing = false;
});
}
},
icon: const Icon(Icons.image_rounded),
label: const Text("Select Image"),
),
ElevatedButton.icon(
onPressed: () async {
faceDetectionsService.clearImage(notify: true);
},
icon: const Icon(Icons.clear_all_rounded),
label: const Text("Clear All"),
),
],
),
),
),
);
}
}
\ No newline at end of file
This diff is collapsed.
export 'home.dart';
\ No newline at end of file
import 'package:flutter/material.dart';
class FacePainter extends CustomPainter {
List<Rect> faces;
Size imageResolution;
FacePainter({
this.faces = const [],
required this.imageResolution,
});
@override
void paint(Canvas canvas, Size size) {
Paint facePainter = Paint()
..color = Colors.red
..style = PaintingStyle.stroke
..strokeWidth = 4;
for (var element in faces) {
double top = (element.top / imageResolution.height) * size.height;
double left = (element.left / imageResolution.width) * size.width;
double width = (element.width / imageResolution.width) * size.width;
double height = (element.height / imageResolution.height) * size.height;
canvas.drawRRect(RRect.fromRectAndRadius(Rect.fromLTWH(left, top, width, height), const Radius.circular(6)), facePainter);
}
}
@override
bool shouldRepaint(covariant CustomPainter oldDelegate) => true;
}
export 'face_painter.dart';
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment