Commit e03258b2 authored by Sachin Kodagoda's avatar Sachin Kodagoda

latest update

parent d87f28c1
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# TypeScript cache
*.tsbuildinfo
# dependencies
/node_modules
/.pnp
.pnp.js
# Optional npm cache directory
.npm
# testing
/coverage
# Optional eslint cache
.eslintcache
# next.js
/.next/
/out/
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# production
/build
# Optional REPL history
.node_repl_history
# misc
.DS_Store
*.pem
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# local env files
.env.local
.env.development.local
.env.test.local
.env.production.local
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
# vercel
.vercel
......@@ -2,4 +2,4 @@
. "$(dirname "$0")/_/husky.sh"
# yarn lint
yarn da:all
# yarn da:all
......@@ -21,8 +21,19 @@
"cSpell.words": [
"Abinaya",
"Kumarasinghe",
"SAMPLINGMODE",
"Thilina",
"babylonjs",
"drei",
"fingerpose",
"gltf",
"gltfjsx",
"handpose",
"tfjs"
"hemi",
"preconnect",
"speechly",
"tfjs",
"unproject",
"webgl"
]
}
const path = require('path');
module.exports = {
webpack: (config, { isServer }) => {
// this i did to avoid fs errors but need to check.
// Fixes npm packages that depend on `fs` module
if (!isServer) {
config.resolve.fallback = { fs: false };
}
return config;
},
env: {
APP_NAME: 'Davinci',
API_KEY: 'AIzaSyBCC6ZG9rLWUyEWsMvKbISBJj61zKY8sS8',
......@@ -21,10 +29,4 @@ module.exports = {
images: {
domains: ['google.com'],
},
// webpack: (config, { isServer }) => {
// if (isServer) {
// require("scripts/generate-sitemap");
// }
// return config;
// },
};
......@@ -14,28 +14,38 @@
"prepare": "husky install"
},
"dependencies": {
"@google-cloud/speech": "^4.5.3",
"@react-three/drei": "^7.0.6",
"@react-three/fiber": "^7.0.2",
"@react-three/gltfjsx": "^4.2.3",
"@speechly/speech-recognition-polyfill": "^1.0.0",
"@tensorflow-models/handpose": "^0.0.7",
"@tensorflow-models/speech-commands": "^0.5.4",
"@tensorflow/tfjs": "^3.7.0",
"@tensorflow/tfjs-backend-webgl": "3.7.0",
"@tensorflow/tfjs-converter": "3.7.0",
"@tensorflow/tfjs-core": "3.7.0",
"@types/three": "^0.130.0",
"fingerpose": "^0.0.2",
"firebase": "^8.6.8",
"firebase": "^8.7.0",
"next": "^11.0.1",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"react-firebase-hooks": "^3.0.4",
"react-select": "^4.3.1",
"react-speech-recognition": "^3.8.2",
"react-webcam": "^5.2.4",
"regenerator-runtime": "^0.13.7",
"sass": "^1.35.1",
"three": "^0.129.0"
"three": "^0.130.1"
},
"devDependencies": {
"@types/react": "^17.0.11",
"@babel/parser": "^7.14.7",
"@types/react": "^17.0.13",
"@types/react-select": "^4.0.16",
"@types/three": "^0.129.1",
"@typescript-eslint/eslint-plugin": "^4.28.0",
"@typescript-eslint/parser": "^4.28.0",
"@types/react-speech-recognition": "^3.6.0",
"@typescript-eslint/eslint-plugin": "^4.28.1",
"@typescript-eslint/parser": "^4.28.1",
"eslint": "^7.29.0",
"eslint-config-airbnb": "^18.2.1",
"eslint-config-next": "^11.0.1",
......@@ -48,8 +58,8 @@
"eslint-plugin-react": "^7.24.0",
"eslint-plugin-react-hooks": "^4.2.0",
"eslint-plugin-sort-destructure-keys": "^1.3.5",
"husky": "^6.0.0",
"husky": "^7.0.0",
"prettier": "^2.3.2",
"typescript": "^4.3.4"
"typescript": "^4.3.5"
}
}
Model Information:
* title: Red (Rigged)
* source: https://sketchfab.com/3d-models/red-rigged-934206963a864f7b885ed248ee009232
* author: Poyo20 (https://sketchfab.com/poyo20)
Model License:
* license type: CC-BY-4.0 (http://creativecommons.org/licenses/by/4.0/)
* requirements: Author must be credited. Commercial use is allowed.
If you use this 3D model in your project be sure to copy paste this credit wherever you share it:
This work is based on "Red (Rigged)" (https://sketchfab.com/3d-models/red-rigged-934206963a864f7b885ed248ee009232) by Poyo20 (https://sketchfab.com/poyo20) licensed under CC-BY-4.0 (http://creativecommons.org/licenses/by/4.0/)
\ No newline at end of file
This diff is collapsed.
import { leftTopToCenter } from '@util/common';
import React, { useState } from 'react';
type TProps = {
children: JSX.Element;
};
interface IContext {
shouldRotate: boolean;
zoom: number;
rotationValue: number;
indexThumbAngle: number;
videoWidth: number;
videoHeight: number;
x: number;
y: number;
setZoom: React.Dispatch<React.SetStateAction<number>>;
setIndexThumbAngle: React.Dispatch<React.SetStateAction<number>>;
setVideoWidth: React.Dispatch<React.SetStateAction<number>>;
setVideoHeight: React.Dispatch<React.SetStateAction<number>>;
setX: React.Dispatch<React.SetStateAction<number>>;
setY: React.Dispatch<React.SetStateAction<number>>;
scaler: number;
divider: number;
xRatio: number;
yRatio: number;
right: number;
top: number;
left: number;
bottom: number;
handCenterX: number;
handCenterY: number;
containerWidth: number;
containerHeight: number;
isYRotationClock: boolean;
setContainerHeight: React.Dispatch<React.SetStateAction<number>>;
setContainerWidth: React.Dispatch<React.SetStateAction<number>>;
setShouldRotate: React.Dispatch<React.SetStateAction<boolean>>;
setIsYRotationClock: React.Dispatch<React.SetStateAction<boolean>>;
}
const initContext: IContext = {
shouldRotate: false,
zoom: 1,
rotationValue: 0,
indexThumbAngle: 0,
videoWidth: 0,
videoHeight: 0,
x: 0,
y: 0,
setZoom: () => null,
setIndexThumbAngle: () => null,
setVideoWidth: () => null,
setVideoHeight: () => null,
setX: () => null,
setY: () => null,
scaler: 100,
divider: 200,
xRatio: 0,
yRatio: 0,
right: 0,
top: 0,
left: 0,
bottom: 0,
handCenterX: 0,
handCenterY: 0,
containerWidth: 0,
containerHeight: 0,
isYRotationClock: false,
setContainerHeight: () => null,
setContainerWidth: () => null,
setShouldRotate: () => null,
setIsYRotationClock: () => null,
};
export const AnimationContext = React.createContext<IContext>(initContext);
export const AnimationContextProvider = ({ children }: TProps): React.ReactElement => {
const [zoom, setZoom] = useState(initContext.zoom);
const [shouldRotate, setShouldRotate] = useState(initContext.shouldRotate);
const [isYRotationClock, setIsYRotationClock] = useState(initContext.isYRotationClock);
const [indexThumbAngle, setIndexThumbAngle] = useState(initContext.indexThumbAngle);
const [videoWidth, setVideoWidth] = useState(initContext.videoWidth);
const [videoHeight, setVideoHeight] = useState(initContext.videoHeight);
const [containerHeight, setContainerHeight] = useState(initContext.containerHeight);
const [containerWidth, setContainerWidth] = useState(initContext.containerWidth);
const [x, setX] = useState(initContext.x);
const [y, setY] = useState(initContext.y);
const scaler = 100;
const divider = scaler * 2;
const xRatio = videoWidth / divider;
const yRatio = videoHeight / divider;
const right = xRatio;
const top = yRatio;
const left = -xRatio;
const bottom = -yRatio;
const handCenterX = leftTopToCenter(x, videoWidth, scaler, 0);
const handCenterY = leftTopToCenter(y, videoHeight, scaler, 0);
// degree_angle * 360 / 100 => (radian_angle * 180 * 360) / (100 * PI)
// const rotationValue = (2 * Math.PI * indexThumbAngle) / 2;
const rotationValue = indexThumbAngle;
const contextValue = {
zoom,
rotationValue,
indexThumbAngle,
videoWidth,
videoHeight,
containerWidth,
containerHeight,
x,
y,
scaler,
divider,
xRatio,
yRatio,
right,
top,
left,
bottom,
handCenterX,
handCenterY,
shouldRotate,
isYRotationClock,
setZoom,
setIndexThumbAngle,
setVideoWidth,
setVideoHeight,
setX,
setY,
setContainerHeight,
setContainerWidth,
setShouldRotate,
setIsYRotationClock,
};
return <AnimationContext.Provider value={contextValue}>{children}</AnimationContext.Provider>;
};
import { create, SpeechCommandRecognizer } from '@tensorflow-models/speech-commands';
import React, { useEffect, useState } from 'react';
import SpeechRecognition, { useSpeechRecognition } from 'react-speech-recognition';
type TProps = {
children: JSX.Element;
};
interface IContext {
model: SpeechCommandRecognizer | null;
action: string | null;
labels: string[];
microphoneOn: boolean;
isMicDisabled: boolean;
setMicrophoneOn: React.Dispatch<React.SetStateAction<boolean>>;
setModel: React.Dispatch<React.SetStateAction<SpeechCommandRecognizer | null>>;
setAction: React.Dispatch<React.SetStateAction<string | null>>;
setLabels: React.Dispatch<React.SetStateAction<string[]>>;
recognizeCommands: () => Promise<void | null>;
messageArray: { message: string; type: string }[];
}
const initContext: IContext = {
model: null,
action: null,
labels: [],
microphoneOn: false,
isMicDisabled: false,
setMicrophoneOn: () => null,
setModel: () => null,
setAction: () => null,
setLabels: () => null,
recognizeCommands: async () => null,
messageArray: [],
};
export const AudioContext = React.createContext<IContext>(initContext);
export const AudioContextProvider = ({ children }: TProps): React.ReactElement => {
const [model, setModel] = useState(initContext.model);
const [action, setAction] = useState(initContext.action); // detected keyword
const [labels, setLabels] = useState(initContext.labels); // list of keywords
const [microphoneOn, setMicrophoneOn] = useState(initContext.microphoneOn);
const [messageArray, setMessageArray] = useState(initContext.messageArray);
const [isMicDisabled, setIsMicDisabled] = useState(initContext.isMicDisabled);
const { resetTranscript, transcript } = useSpeechRecognition();
const loadModel = async () => {
const recognizer = await create('BROWSER_FFT');
await recognizer.ensureModelLoaded();
setModel(recognizer);
setLabels(recognizer.wordLabels());
};
const stopListening = async () => {
if (model) {
setMicrophoneOn(false);
model.stopListening();
}
// resetTranscript();
SpeechRecognition.stopListening();
};
const argMax = (arr: string[]) => {
return arr.map((x, i) => [x, i]).reduce((r, a) => (a[0] > r[0] ? a : r))[1];
};
const setTheAction = async (val: string | null) => {
setAction(val);
setMessageArray(msgArr => {
const temp = [...msgArr];
if (val) {
const isAvailable = labels.includes(val);
if (isAvailable) {
temp.push({ message: val, type: 'key' });
}
}
return temp;
});
if (model && val === 'stop') {
await stopListening();
}
};
const recognizeCommands = async () => {
if (model) {
model.listen(
result => {
return setTheAction(labels[argMax(Object.values(result.scores))]);
},
{ includeSpectrogram: true, probabilityThreshold: 0.7 }
);
// setTimeout(() => {
// setMicrophoneOn(false);
// model.stopListening();
// }, 10e3);
}
};
useEffect(() => {
if (microphoneOn && !!model) {
recognizeCommands();
}
if (microphoneOn) {
resetTranscript();
setIsMicDisabled(true);
SpeechRecognition.startListening({ continuous: true });
} else {
setIsMicDisabled(false);
const msg = transcript;
const tempMsg = msg.replace('stop', '');
if (tempMsg) {
setMessageArray(msgArr => {
const temp = [...msgArr];
temp.push({ message: tempMsg, type: 'other' });
return temp;
});
}
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [microphoneOn]);
useEffect(() => {
loadModel();
}, []);
const contextValue = {
model,
action,
labels,
isMicDisabled,
microphoneOn,
messageArray,
setMicrophoneOn,
setModel,
setAction,
setLabels,
recognizeCommands,
};
return <AudioContext.Provider value={contextValue}>{children}</AudioContext.Provider>;
};
......@@ -7,10 +7,15 @@ export interface IHtmlInputElement {
target: HTMLInputElement;
}
export interface IHTMLVideoElement {
export interface IHtmlVideoElement {
target: HTMLVideoElement;
}
export interface IHTMLCanvasElement {
export interface IHtmlCanvasElement {
target: HTMLCanvasElement;
}
export interface IHtmlDivElement {
target: HTMLDivElement;
current: HTMLDivElement;
}
/* eslint-disable @typescript-eslint/no-non-null-assertion */
import { useGLTF } from '@react-three/drei';
import { useFrame, Vector3 } from '@react-three/fiber';
import React, { useRef } from 'react';
import { GLTF } from 'three/examples/jsm/loaders/GLTFLoader';
// for more models
// https://sketchfab.com/3d-models?features=downloadable
type TProps = {
position: Vector3;
zoom: number;
shouldRotate: boolean;
isYRotationClock: boolean;
};
type TGLTFResult = GLTF & {
nodes: {
Armature_rootJoint: THREE.Mesh;
RedGeo_0: THREE.SkinnedMesh;
};
materials: {
RedMat: THREE.MeshStandardMaterial;
};
};
const ThreeModel = ({ isYRotationClock, position, shouldRotate, zoom }: TProps): JSX.Element => {
const group = useRef<THREE.Mesh>(null!);
const { materials, nodes } = useGLTF('3d/angryBird/scene.gltf') as TGLTFResult;
useFrame(() => {
if (shouldRotate) {
if (isYRotationClock) {
group.current.rotation.y += 0.05;
} else {
group.current.rotation.y -= 0.05;
}
}
});
return (
<group ref={group} dispose={null} position={position} scale={zoom}>
<group rotation={[-Math.PI / 2, 0, 0]}>
<group rotation={[Math.PI / 2, 0, 0]} scale={1.47}>
<primitive object={nodes.Armature_rootJoint} />
<skinnedMesh
geometry={nodes.RedGeo_0.geometry}
material={materials.RedMat}
skeleton={nodes.RedGeo_0.skeleton}
/>
</group>
</group>
</group>
);
};
useGLTF.preload('/scene_draco.glb');
export default ThreeModel;
import ThreeTestModel from '@components/ThreeTestModel';
import styles from '@components_style/ThreeWorld.module.sass';
import { AnimationContext } from '@ctx/AnimationContext';
import { OrthographicCamera } from '@react-three/drei';
import { Canvas } from '@react-three/fiber';
import { leftTopToCenter } from '@util/common';
import React, { Suspense, useContext } from 'react';
// examples
// https://onion2k.github.io/r3f-by-example/
// https://github.com/pmndrs/react-three-fiber/blob/master/markdown/api.md
const ThreeWorld = (): JSX.Element => {
const {
bottom,
divider,
isYRotationClock,
left,
right,
scaler,
shouldRotate,
top,
videoHeight,
videoWidth,
x,
y,
zoom,
} = useContext(AnimationContext);
return (
<Canvas className={styles.newCanvas} style={{ width: videoWidth, height: videoHeight }}>
<ambientLight />
<pointLight position={[10, 10, 10]} />
<OrthographicCamera
makeDefault
zoom={1}
top={top}
bottom={bottom}
left={left}
right={right}
near={0}
far={divider}
position={[0, 0, divider]}
/>
<Suspense fallback={null}>
<ThreeTestModel
position={[leftTopToCenter(x, videoWidth, scaler, 0), leftTopToCenter(y, videoHeight, scaler, 150), 0]}
zoom={zoom}
shouldRotate={shouldRotate}
isYRotationClock={isYRotationClock}
/>
</Suspense>
</Canvas>
);
};
export default ThreeWorld;
import { AudioContext } from '@ctx/AudioContext';
import styles from '@layouts_style/MainBottomBar.module.sass';
import React, { useContext, useState } from 'react';
import SpeechRecognition from 'react-speech-recognition';
type TProps = {
videoOn: boolean;
showLeftBar: boolean;
showRightBar: boolean;
setVideoOn: (x: boolean) => void;
setShowLeftBar: (x: boolean) => void;
setShowRightBar: (x: boolean) => void;
};
const MainBottomBar = ({
setShowLeftBar,
setShowRightBar,
setVideoOn,
showLeftBar,
showRightBar,
videoOn,
}: TProps): JSX.Element => {
const [threeMenuOn, setThreeMenuOn] = useState(false);
const [settingsMenuOn, setSettingsMenuOn] = useState(false);
const [streaming, setStreaming] = useState(false);
const { isMicDisabled, microphoneOn, setMicrophoneOn } = useContext(AudioContext);
const videoIcon = videoOn ? 'videoCameraActive' : 'videoCamera';
const streamingIcon = streaming ? 'streamingActive' : 'streaming';
const microphoneIcon = microphoneOn ? 'microphoneActive' : 'microphone';
const threeMenuIcon = threeMenuOn ? '3dActive' : '3d';
const settingsMenuIcon = settingsMenuOn ? 'settingsActive' : 'settings';
const usersIcon = showLeftBar ? 'groupActive' : 'group';
const messageIcon = showRightBar ? 'messageActive' : 'message';
return (
<div className={styles.bottomBar}>
<div className={styles.left}>
<img
src={`/images/${threeMenuIcon}.svg`}
alt=''
className={styles.leftIcons}
onMouseEnter={() => {
setThreeMenuOn(true);
}}
onMouseLeave={() => {
setThreeMenuOn(false);
}}
/>
<img
src={`/images/${settingsMenuIcon}.svg`}
alt=''
className={styles.leftIcons}
onMouseEnter={() => {
setSettingsMenuOn(true);
}}
onMouseLeave={() => {
setSettingsMenuOn(false);
}}
aria-hidden='true'
/>
</div>
<div className={styles.middle}>
<img
src={`/images/${videoIcon}.svg`}
alt=''
className={styles.middleIcons}
onClick={() => {
const tempState = videoOn;
setVideoOn(!tempState);
}}
aria-hidden='true'
/>
<img
src={`/images/${streamingIcon}.svg`}
alt=''
className={styles.middleIcons}
onClick={() => {
setStreaming(!streaming);
SpeechRecognition.stopListening();
}}
aria-hidden='true'
/>
<img
src={`/images/${microphoneIcon}.svg`}
alt=''
className={`${styles.middleIcons} ${isMicDisabled ? styles.disabled : ''}`}
onClick={() => {
if (!isMicDisabled) {
setMicrophoneOn(prev => !prev);
}
}}
aria-hidden='true'
/>
</div>
<div className={styles.right}>
<img
src={`/images/${usersIcon}.svg`}
alt=''
className={styles.rightIcons}
onClick={() => {
setShowLeftBar(!showLeftBar);
}}
aria-hidden='true'
/>
<img
src={`/images/${messageIcon}.svg`}
alt=''
className={styles.rightIcons}
onClick={() => {
setShowRightBar(!showRightBar);
}}
aria-hidden='true'
/>
</div>
</div>
);
};
export default MainBottomBar;
import styles from '@layouts_style/MainLeftBar.module.sass';
import React from 'react';
const MainLeftBar = (): JSX.Element => {
return (
<div className={styles.videoMenuLeft}>
<div className={styles.videoItem}>D.G.Kodagoda</div>
<div className={styles.videoItem}>Y.Abinaya</div>
<div className={styles.videoItem}>Y.R.Kodagoda</div>
<div className={styles.videoItem}>S.Kodagoda</div>
<div className={styles.videoItem}>I.Kumarasinghe</div>
<div className={styles.videoItem}>Thilina</div>
<div className={styles.videoItem}>User1</div>
<div className={styles.videoItem}>User2</div>
<div className={styles.videoItem}>User3</div>
<div className={styles.videoItem}>User4</div>
<div className={styles.videoItem}>User5</div>
<div className={styles.videoItem}>User6</div>
</div>
);
};
export default MainLeftBar;
import ThreeWorld from '@components/ThreeWorld';
import { AnimationContext } from '@ctx/AnimationContext';
import styles from '@layouts_style/MainMiddleArea.module.sass';
import * as handpose from '@tensorflow-models/handpose';
import { drawFullHand, drawZooming, fullCalculation } from '@util/handPose';
import React, { useContext, useEffect } from 'react';
import Webcam from 'react-webcam';
type TProps = {
canvasRef: React.MutableRefObject<HTMLCanvasElement | null>;
webcamRef: React.MutableRefObject<Webcam | null>;
};
const MainMiddleArea = ({ canvasRef, webcamRef }: TProps): JSX.Element => {
const { setShouldRotate, setVideoHeight, setVideoWidth, setX, setY, setZoom } = useContext(AnimationContext);
const showWebCam = true;
const { containerHeight, containerWidth } = useContext(AnimationContext);
const runHandpose = async () => {
const net = await handpose.load();
// Loop and detect hands
setInterval(() => {
detect(net);
}, 10); // 10
};
const detect = async (net: handpose.HandPose) => {
const videoReference = webcamRef?.current?.video as HTMLVideoElement;
const canvasReference = canvasRef?.current as HTMLCanvasElement;
if (
typeof webcamRef.current !== 'undefined' &&
webcamRef.current !== null &&
videoReference.readyState === 4 &&
net
) {
const { videoWidth } = videoReference;
const { videoHeight } = videoReference;
videoReference.width = videoWidth;
videoReference.height = videoHeight;
canvasReference.width = videoWidth;
canvasReference.height = videoHeight;
setVideoWidth(videoWidth);
setVideoHeight(videoHeight);
const hand = await net.estimateHands(videoReference);
const ctx = canvasReference.getContext('2d');
if (ctx) {
drawZooming(ctx, hand);
drawFullHand(ctx, hand);
// markCanvasCorners(ctx, videoWidth, videoHeight);
const { indexDown, middleDown, pinkyDown, ringDown, thumbIn, xVal, yVal } = fullCalculation(hand);
if (indexDown !== null && pinkyDown !== null && ringDown !== null && middleDown !== null && thumbIn !== null) {
if (pinkyDown && ringDown && middleDown) {
setZoom(3);
} else {
setZoom(1);
}
setShouldRotate(indexDown);
}
if (xVal !== null && yVal !== null) {
setX(xVal);
setY(yVal);
}
}
}
};
useEffect(() => {
runHandpose();
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
{showWebCam && (
<Webcam
ref={webcamRef}
className={styles.videoObject}
screenshotFormat='image/jpeg'
mirrored
width={containerWidth}
height={containerHeight}
videoConstraints={{ width: containerWidth, height: containerHeight }}
/>
)}
<canvas ref={canvasRef} className={styles.canvasObject} />
<ThreeWorld />
</>
);
};
export default MainMiddleArea;
import styles from '@layouts_style/MainMiddlePlaceHolder.module.sass';
import React from 'react';
const MainMiddlePlaceHolder = (): JSX.Element => {
return (
<div className={styles.videoPlaceholder}>
<div className={styles.userName}>Duminda Kodagoda</div>
<div className={styles.userMessage}>Your video is off!</div>
</div>
);
};
export default MainMiddlePlaceHolder;
import EllipsisAnimator from '@components/EllipsisAnimator';
import { AudioContext } from '@ctx/AudioContext';
import styles from '@layouts_style/MainRightBar.module.sass';
import React, { useContext } from 'react';
import { useSpeechRecognition } from 'react-speech-recognition';
const MainRightBar = (): JSX.Element => {
const { action, messageArray, microphoneOn } = useContext(AudioContext);
const { transcript } = useSpeechRecognition();
let text = 'Turn your MIC on';
if (microphoneOn) {
text = '';
}
return (
<div className={styles.videoMenuRight}>
<div className={styles.chatItemCover}>
{messageArray.map((item, index) => {
if (item.type === 'key' && item.message === 'stop') {
return (
<div key={`chatItem-key-${index + 1}`} className={styles.keyItem}>
{item.message}
</div>
);
}
if (item.type === 'key' && item.message !== 'stop') {
return (
<div key={`chatItem-key-stop-${index + 1}`} className={styles.hiddenKey}>
{item.message}
</div>
);
}
return (
<div key={`chatItem-other-${index + 1}`} className={styles.chatItem}>
{item.message.replace('stop', '')}
</div>
);
})}
</div>
{microphoneOn && (
<div className={styles.waiting}>
<EllipsisAnimator text='Listening' />
<div>{action ? `${action}` : `${text}`}</div>
</div>
)}
<div className={styles.chatInput}>
<div className={styles.chatInputItem}>{transcript.replace('stop', '')}</div>
</div>
</div>
);
};
export default MainRightBar;
export const leftTopToCenter = (value: number, length: number, scaler: number, extra: number): number => {
return length / (2 * scaler) - (value + extra) / scaler;
};
// PI/2 = 90 Degree
// PI = 180 Degree
// 0 Degree = 0 Radian **
// 30 Degree = 0.523599 Radian
// 45 Degree = 0.785398 Radian
// 60 Degree = 1.0472 Radian
// 90 Degree = 1.5708 Radian ***
// 120 Degree = 2.0944 Radian
// 150 Degree = 2.61799 Radian
// 180 Degree = 3.14159 Radian ***
// 270 Degree = 4.71239 Radian ***
// 360 Degree = 6.28319 Radian ***
export const toDegree = (radian: number): number => (radian * 180) / Math.PI;
export const toRadian = (degree: number): number => (degree * Math.PI) / 180;
export const toFixedVal = (val: number, base: number): number => +val.toFixed(base);
import { AnnotatedPrediction } from '@tensorflow-models/handpose';
type TReturn = {
xVal: number | null;
yVal: number | null;
xLen: number | null;
indexThumbAngle: number | null;
indexDown: boolean | null;
middleDown: boolean | null;
ringDown: boolean | null;
pinkyDown: boolean | null;
thumbIn: boolean | null;
};
type TFingerJoints = {
thumb: number[];
index: number[];
middle: number[];
ring: number[];
pinky: number[];
};
// Points for finger
const fingerJoints: TFingerJoints = {
thumb: [0, 1, 2, 3, 4],
index: [0, 5, 6, 7, 8],
middle: [0, 9, 10, 11, 12],
ring: [0, 13, 14, 15, 16],
pinky: [0, 17, 18, 19, 20],
};
// Infinity Gauntlet Style
const style = {
0: { color: 'yellow', size: 15 },
1: { color: 'gold', size: 6 },
2: { color: 'green', size: 10 }, // green
3: { color: 'gold', size: 6 },
4: { color: 'gold', size: 6 },
5: { color: 'purple', size: 10 }, // purple
6: { color: 'gold', size: 6 },
7: { color: 'gold', size: 6 },
8: { color: 'gold', size: 6 },
9: { color: 'blue', size: 10 }, // blue
10: { color: 'gold', size: 6 },
11: { color: 'gold', size: 6 },
12: { color: 'gold', size: 6 },
13: { color: 'red', size: 10 }, // red
14: { color: 'gold', size: 6 },
15: { color: 'gold', size: 6 },
16: { color: 'gold', size: 6 },
17: { color: 'orange', size: 10 }, // orange
18: { color: 'gold', size: 6 },
19: { color: 'gold', size: 6 },
20: { color: 'gold', size: 6 },
};
// Draw the center of the hand -->
export const drawHandCenter = (ctx: CanvasRenderingContext2D, x: number, y: number): void => {
ctx.beginPath();
ctx.arc(x, y, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
};
type TNumArr = [number, number, number];
const find_angle = (A: TNumArr, B: TNumArr, C: TNumArr) => {
// ABC triangle.. Checking angle at B (in radian)
const c = Math.sqrt((B[0] - A[0]) ** 2 + (B[1] - A[1]) ** 2);
const a = Math.sqrt((B[0] - C[0]) ** 2 + (B[1] - C[1]) ** 2);
const b = Math.sqrt((C[0] - A[0]) ** 2 + (C[1] - A[1]) ** 2);
return Math.acos((a * a + c * c - b * b) / (2 * a * c));
};
// Get Hand center -->
export const fullCalculation = (hand: AnnotatedPrediction[]): TReturn => {
if (hand.length > 0) {
const predicted = hand[0].landmarks;
if (predicted.length > 0) {
// [mark][x or y or z]
// origin is top right corner
//
const xMidSum = predicted[0][0] + predicted[5][0] + predicted[17][0]; // x
const yMidSum = predicted[0][1] + predicted[5][1] + predicted[17][1]; // y
const xVal = parseFloat((xMidSum / 3).toFixed(4));
const yVal = parseFloat((yMidSum / 3).toFixed(4));
const xLen = predicted[17][0] - predicted[5][0];
const indexThumbAngle = find_angle(predicted[8], predicted[5], predicted[4]);
return {
xVal,
yVal,
xLen,
indexThumbAngle,
indexDown: predicted[8][1] > predicted[5][1],
middleDown: predicted[12][1] > predicted[9][1],
ringDown: predicted[16][1] > predicted[13][1],
pinkyDown: predicted[20][1] > predicted[17][1],
thumbIn: predicted[4][0] < predicted[1][0],
};
}
}
return {
xVal: null,
yVal: null,
xLen: null,
indexThumbAngle: null,
indexDown: null,
middleDown: null,
ringDown: null,
pinkyDown: null,
thumbIn: null,
};
};
// Draw Zooming -->
export const drawZooming = (ctx: CanvasRenderingContext2D, hand: AnnotatedPrediction[]): void => {
if (hand.length > 0) {
const predicted = hand[0].landmarks;
if (predicted.length > 0) {
ctx.beginPath();
ctx.moveTo(predicted[8][0], predicted[8][1]);
ctx.lineTo(predicted[5][0], predicted[5][1]);
ctx.strokeStyle = 'red';
ctx.lineWidth = 4;
ctx.stroke();
ctx.beginPath();
ctx.moveTo(predicted[5][0], predicted[5][1]);
ctx.lineTo(predicted[4][0], predicted[4][1]);
ctx.strokeStyle = 'red';
ctx.lineWidth = 4;
ctx.stroke();
}
}
};
// Draw the full hand -->
export const drawFullHand = (ctx: CanvasRenderingContext2D, hand: AnnotatedPrediction[]): void => {
if (hand.length > 0) {
hand.forEach((prediction: AnnotatedPrediction) => {
const { landmarks } = prediction;
for (let j = 0; j < Object.keys(fingerJoints).length; j += 1) {
const finger = Object.keys(fingerJoints)[j];
for (let k = 0; k < fingerJoints[finger].length - 1; k += 1) {
const firstJointIndex = fingerJoints[finger][k];
const secondJointIndex = fingerJoints[finger][k + 1];
ctx.beginPath();
ctx.moveTo(landmarks[firstJointIndex][0], landmarks[firstJointIndex][1]);
ctx.lineTo(landmarks[secondJointIndex][0], landmarks[secondJointIndex][1]);
ctx.strokeStyle = 'plum';
ctx.lineWidth = 4;
ctx.stroke();
}
}
for (let i = 0; i < landmarks.length; i += 1) {
const x = landmarks[i][0];
const y = landmarks[i][1];
ctx.beginPath();
ctx.arc(x, y, style[i].size, 0, 3 * Math.PI);
ctx.fillStyle = style[i].color;
ctx.fill();
}
});
}
};
// Draw the corners of the canvas -->
export const markCanvasCorners = (ctx: CanvasRenderingContext2D, screenWidth: number, screenHeight: number): void => {
const xMiddle = screenWidth / 2;
const yMiddle = screenHeight / 2;
const right = 0;
const left = screenWidth;
const top = 0;
const bottom = screenHeight;
// origin (right top)
ctx.beginPath();
ctx.arc(right, top, 20, 0, 3 * Math.PI);
ctx.fillStyle = 'white';
ctx.fill();
// center (xMiddle, yMiddle)
ctx.beginPath();
ctx.arc(xMiddle, yMiddle, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// left top
ctx.beginPath();
ctx.arc(left, top, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// xMiddle top
ctx.beginPath();
ctx.arc(xMiddle, top, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// right top
ctx.beginPath();
ctx.arc(right, top, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// right yMiddle
ctx.beginPath();
ctx.arc(right, yMiddle, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// right bottom
ctx.beginPath();
ctx.arc(right, bottom, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// xMiddle bottom
ctx.beginPath();
ctx.arc(xMiddle, bottom, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// left bottom
ctx.beginPath();
ctx.arc(left, bottom, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
// left yMiddle
ctx.beginPath();
ctx.arc(left, yMiddle, 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
};
import { AnnotatedPrediction } from '@tensorflow-models/handpose';
type TFingerJoints = {
thumb: number[];
indexFinger: number[];
middleFinger: number[];
ringFinger: number[];
pinky: number[];
};
// Points for finger
const fingerJoints: TFingerJoints = {
thumb: [0, 1, 2, 3, 4],
indexFinger: [0, 5, 6, 7, 8],
middleFinger: [0, 9, 10, 11, 12],
ringFinger: [0, 13, 14, 15, 16],
pinky: [0, 17, 18, 19, 20],
};
// Infinity Gauntlet Style
const style = {
0: { color: 'yellow', size: 15 },
1: { color: 'gold', size: 6 },
2: { color: 'green', size: 10 }, // green
3: { color: 'gold', size: 6 },
4: { color: 'gold', size: 6 },
5: { color: 'purple', size: 10 }, // purple
6: { color: 'gold', size: 6 },
7: { color: 'gold', size: 6 },
8: { color: 'gold', size: 6 },
9: { color: 'blue', size: 10 }, // blue
10: { color: 'gold', size: 6 },
11: { color: 'gold', size: 6 },
12: { color: 'gold', size: 6 },
13: { color: 'red', size: 10 }, // red
14: { color: 'gold', size: 6 },
15: { color: 'gold', size: 6 },
16: { color: 'gold', size: 6 },
17: { color: 'orange', size: 10 }, // orange
18: { color: 'gold', size: 6 },
19: { color: 'gold', size: 6 },
20: { color: 'gold', size: 6 },
};
export const drawHand = (predictions: AnnotatedPrediction[], ctx: CanvasRenderingContext2D): void => {
if (predictions.length > 0) {
// predictions.forEach((prediction: AnnotatedPrediction) => {
// const { landmarks } = prediction;
// for (let j = 0; j < Object.keys(fingerJoints).length; j += 1) {
// const finger = Object.keys(fingerJoints)[j];
// for (let k = 0; k < fingerJoints[finger].length - 1; k += 1) {
// const firstJointIndex = fingerJoints[finger][k];
// const secondJointIndex = fingerJoints[finger][k + 1];
// ctx.beginPath();
// ctx.moveTo(landmarks[firstJointIndex][0], landmarks[firstJointIndex][1]);
// ctx.lineTo(landmarks[secondJointIndex][0], landmarks[secondJointIndex][1]);
// ctx.strokeStyle = 'plum';
// ctx.lineWidth = 4;
// ctx.stroke();
// }
// }
// for (let i = 0; i < landmarks.length; i += 1) {
// const x = landmarks[i][0];
// const y = landmarks[i][1];
// ctx.beginPath();
// ctx.arc(x, y, style[i].size, 0, 3 * Math.PI);
// ctx.fillStyle = style[i].color;
// ctx.fill();
// }
// });
if (predictions.length > 0) {
const predicted = predictions[0].landmarks;
if (predicted.length > 0) {
const xMidSum = predicted[0][0] + predicted[5][0] + predicted[17][0];
const yMidSum = predicted[0][1] + predicted[5][1] + predicted[17][1];
ctx.beginPath();
ctx.arc(parseFloat((xMidSum / 3).toFixed(4)), parseFloat((yMidSum / 3).toFixed(4)), 10, 0, 3 * Math.PI);
ctx.fillStyle = 'red';
ctx.fill();
}
}
}
};
/* eslint-disable @next/next/no-page-custom-font */
import { AnimationContextProvider } from '@ctx/AnimationContext';
import { AudioContextProvider } from '@ctx/AudioContext';
import { CartContextProvider } from '@ctx/CartContext';
import { UserContextProvider } from '@ctx/UserContext';
import '@styles/globals.sass';
import type { AppProps } from 'next/app';
import Head from 'next/head';
import React from 'react';
import 'regenerator-runtime/runtime';
const MyApp = ({ Component, pageProps }: AppProps): React.ReactElement => {
return (
......@@ -20,11 +24,19 @@ const MyApp = ({ Component, pageProps }: AppProps): React.ReactElement => {
<meta name='description' content='Art for everyone' />
<meta name='theme-color' content='#000' />
<meta name='apple-mobile-web-app-status-bar-style' content='#000' />
<link rel='preconnect' href='https://fonts.googleapis.com' />
<link rel='preconnect' href='https://fonts.gstatic.com' crossOrigin='true' />
<link href='https://fonts.googleapis.com/css2?family=Quicksand:wght@400&display=swap' rel='stylesheet' />
</Head>
<UserContextProvider>
<CartContextProvider>
{/* eslint-disable-next-line react/jsx-props-no-spreading */}
<Component {...pageProps} />
<AudioContextProvider>
<AnimationContextProvider>
{/* eslint-disable-next-line react/jsx-props-no-spreading */}
<Component {...pageProps} />
</AnimationContextProvider>
</AudioContextProvider>
</CartContextProvider>
</UserContextProvider>
</>
......
import Document, { DocumentContext, DocumentInitialProps } from 'next/document';
import 'regenerator-runtime/runtime';
class MyDocument extends Document {
static async getInitialProps(ctx: DocumentContext): Promise<DocumentInitialProps> {
const initialProps = await Document.getInitialProps(ctx);
return initialProps;
}
}
export default MyDocument;
import { AnimationContext } from '@ctx/AnimationContext';
import MainBottomBar from '@layouts/MainBottomBar';
import MainLeftBar from '@layouts/MainLeftBar';
import MainMiddleArea from '@layouts/MainMiddleArea';
import MainMiddlePlaceHolder from '@layouts/MainMiddlePlaceHolder';
import MainRightBar from '@layouts/MainRightBar';
import styles from '@pages_style/index.module.sass';
import * as handpose from '@tensorflow-models/handpose';
import '@tensorflow/tfjs-backend-webgl';
import { drawHand } from '@util/utilities';
import React, { useEffect, useRef, useState } from 'react';
import React, { useContext, useEffect, useRef, useState } from 'react';
import Webcam from 'react-webcam';
import * as THREE from 'three';
const Index = (): JSX.Element => {
const [threeMenuOn, setThreeMenuOn] = useState(false);
const [settingsMenuOn, setSettingsMenuOn] = useState(false);
const webcamRef = useRef<Webcam | null>(null);
const canvasRef = useRef<HTMLCanvasElement | null>(null);
const [streaming, setStreaming] = useState(false);
const containerRef = useRef<HTMLDivElement | null>(null);
const [videoOn, setVideoOn] = useState(false);
const [microphoneOn, setMicrophoneOn] = useState(false);
const [showLeftBar, setShowLeftBar] = useState(true);
const [showRightBar, setShowRightBar] = useState(true);
const threeMenuIcon = threeMenuOn ? '3dActive' : '3d';
const settingsMenuIcon = settingsMenuOn ? 'settingsActive' : 'settings';
const videoIcon = videoOn ? 'videoCameraActive' : 'videoCamera';
const streamingIcon = streaming ? 'streamingActive' : 'streaming';
const microphoneIcon = microphoneOn ? 'microphoneActive' : 'microphone';
const usersIcon = showLeftBar ? 'groupActive' : 'group';
const messageIcon = showRightBar ? 'messageActive' : 'message';
const [page, setPages] = useState({});
const [cube, setCube] = useState([{ name: 'initCube' }]);
const [count, setCount] = useState(1);
const messageArray = [
{
message: 'Hi',
time: '',
},
{
message: 'Test',
time: '',
},
];
const runHandpose = async () => {
const net = await handpose.load();
// Loop and detect hands
setInterval(() => {
detect(net);
}, 10);
};
const detect = async (net: handpose.HandPose) => {
const videoReference = webcamRef?.current?.video as HTMLVideoElement;
const canvasReference = canvasRef?.current as HTMLCanvasElement;
if (
typeof webcamRef.current !== 'undefined' &&
webcamRef.current !== null &&
videoReference.readyState === 4 &&
net
) {
// Get video properties
const { videoWidth } = videoReference;
const { videoHeight } = videoReference;
// Set video width
videoReference.width = videoWidth;
videoReference.height = videoHeight;
// Set canvas height
canvasReference.width = videoWidth;
canvasReference.height = videoHeight;
// Make Detections
const hand = await net.estimateHands(videoReference);
// Draw mesh
const ctx = canvasReference.getContext('2d');
if (ctx) {
drawHand(hand, ctx);
}
}
};
const { setContainerHeight, setContainerWidth } = useContext(AnimationContext);
useEffect(() => {
runHandpose();
// Scene, camera, and renderer
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
setPages({ scene, camera, renderer });
camera.position.z = 60;
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const containerRefs = containerRef?.current as HTMLDivElement;
if (containerRefs) {
setContainerWidth(containerRefs.clientWidth);
setContainerHeight(containerRefs.clientHeight);
}
}, [setContainerHeight, setContainerWidth]);
return (
<div className={styles.container}>
<div className={styles.videoContainer}>
<div className={styles.videoContainerInner}>
{showLeftBar && (
<div className={styles.videoMenuLeft}>
<div className={styles.videoItem}>D.G.Kodagoda</div>
<div className={styles.videoItem}>Y.Abinaya</div>
<div className={styles.videoItem}>Y.R.Kodagoda</div>
<div className={styles.videoItem}>S.Kodagoda</div>
<div className={styles.videoItem}>I.Kumarasinghe</div>
<div className={styles.videoItem}>Thilina</div>
<div className={styles.videoItem}>User1</div>
<div className={styles.videoItem}>User2</div>
<div className={styles.videoItem}>User3</div>
<div className={styles.videoItem}>User4</div>
<div className={styles.videoItem}>User5</div>
<div className={styles.videoItem}>User6</div>
</div>
)}
<div className={styles.videoMenuMiddle} style={{ marginLeft: showLeftBar ? '20px' : '0px' }}>
{videoOn ? (
<>
<Webcam ref={webcamRef} className={styles.videoObject} />
<canvas ref={canvasRef} className={styles.canvasObject} />
</>
) : (
<div className={styles.videoPlaceholder}>
<div className={styles.userName}>Duminda Kodagoda</div>
<div className={styles.userMessage}>Your video is off!</div>
</div>
)}
{showLeftBar && <MainLeftBar />}
<div ref={containerRef} className={styles.videoMenuMiddle}>
{videoOn ? <MainMiddleArea canvasRef={canvasRef} webcamRef={webcamRef} /> : <MainMiddlePlaceHolder />}
</div>
{showRightBar && (
<div className={styles.videoMenuRight}>
<div className={styles.chatItemCover}>
{messageArray.map((item, index) => (
<div key={`chatItem-${index + 1}`} className={styles.chatItem}>
{item.message}
</div>
))}
</div>
<div className={styles.chatInput}>
<input type='text' className={styles.chatInputItem} />
</div>
</div>
)}
</div>
</div>
<div className={styles.bottomBar}>
<div className={styles.left}>
<img
src={`/images/${threeMenuIcon}.svg`}
alt=''
className={styles.leftIcons}
onMouseEnter={() => {
setThreeMenuOn(true);
}}
onMouseLeave={() => {
setThreeMenuOn(false);
}}
/>
<img
src={`/images/${settingsMenuIcon}.svg`}
alt=''
className={styles.leftIcons}
onMouseEnter={() => {
setSettingsMenuOn(true);
}}
onMouseLeave={() => {
setSettingsMenuOn(false);
}}
aria-hidden='true'
/>
</div>
<div className={styles.middle}>
<img
src={`/images/${videoIcon}.svg`}
alt=''
className={styles.middleIcons}
onClick={() => {
const tempState = videoOn;
setVideoOn(!tempState);
}}
aria-hidden='true'
/>
<img
src={`/images/${streamingIcon}.svg`}
alt=''
className={styles.middleIcons}
onClick={() => {
setStreaming(!streaming);
}}
aria-hidden='true'
/>
<img
src={`/images/${microphoneIcon}.svg`}
alt=''
className={styles.middleIcons}
onClick={() => {
setMicrophoneOn(!microphoneOn);
}}
aria-hidden='true'
/>
</div>
<div className={styles.right}>
<img
src={`/images/${usersIcon}.svg`}
alt=''
className={styles.rightIcons}
onClick={() => {
setShowLeftBar(!showLeftBar);
}}
aria-hidden='true'
/>
<img
src={`/images/${messageIcon}.svg`}
alt=''
className={styles.rightIcons}
onClick={() => {
setShowRightBar(!showRightBar);
}}
aria-hidden='true'
/>
{showRightBar && <MainRightBar />}
</div>
</div>
<MainBottomBar
videoOn={videoOn}
showLeftBar={showLeftBar}
showRightBar={showRightBar}
setVideoOn={val => setVideoOn(val)}
setShowLeftBar={val => setShowLeftBar(val)}
setShowRightBar={val => setShowRightBar(val)}
/>
</div>
);
};
......
......@@ -9,7 +9,7 @@
html,
body
font-family: sans-serif
font-family: 'Quicksand', sans-serif
font-size: 16px
line-height: 1.6
margin: 0
......@@ -104,3 +104,6 @@ input[type='hidden']
button
+reset_button
// video
// object-fit: cover !important
......@@ -15,164 +15,15 @@
align-items: center
justify-content: center
.videoMenuLeft
flex-grow: 0
flex-shrink: 0
width: 200px
height: 100%
border-radius: 10px
display: flex
flex-direction: column
overflow-y: scroll
&::-webkit-scrollbar
display: none
-ms-overflow-style: none
scrollbar-width: none
.videoItem
flex-shrink: 0
flex-grow: 0
width: 100%
height: 200px
color: $white
background: $black2
border-radius: 10px
margin-bottom: 10px
display: flex
align-items: center
justify-content: center
font-size: 20px
font-weight: 900
.videoMenuMiddle
flex-grow: 1
height: 100%
width: 100%
margin: 0 20px
margin: 0
padding: 0
overflow: hidden
display: flex
align-items: center
justify-content: center
border-radius: 10px
position: relative
.videoObject
width: 100% !important
height: auto !important
transform: scaleX(-1)
.videoPlaceholder
background: $black3
width: 100%
height: 100%
display: flex
align-items: center
justify-content: center
flex-direction: column
color: $white
font-size: 3rem
.canvasObject
position: absolute
width: 100%
height: 100%
transform: scaleX(-1)
.videoMenuRight
flex-grow: 0
flex-shrink: 0
background: $black2
width: 300px
height: 100%
border-radius: 10px
display: flex
justify-content: space-between
flex-direction: column
.chatItemCover
flex-grow: 1
font-size: 18px
display: flex
align-items: flex-start
justify-content: flex-start
flex-direction: column
padding: 15px 15px 0px
.chatItem
display: inline-block
margin-bottom: 15px
border-radius: 10px 20px 20px 0px
color: $white
background: $black4
padding: 0.5em 1em
font-weight: 900
.chatInput
flex-grow: 0
flex-shrink: 0
border-top: 10px solid $black !important
.chatInputItem
opacity: 1
border: none
outline: none
width: 100%
padding: 0.8em
background: $black3!important
background-image: none !important
border-radius: 0 0 10px 10px
font-size: 18px
color: $white
font-weight: 900
.bottomBar
width: 100%
height: 60px
background: $black2
display: flex
align-items: center
justify-content: center
padding: 0 20px
.iconBottomBar
cursor: pointer
height: 40px
width: auto
.left
flex-grow: 1
display: flex
align-items: center
justify-content: flex-start
column-gap: 20px
.leftIcons
cursor: pointer
height: 30px
width: auto
.middle
display: flex
align-items: center
justify-content: center
background: $black1
padding: 10px 20px
border-radius: 10px
column-gap: 20px
.middleIcons
height: 30px
width: auto
cursor: pointer
.right
flex-grow: 1
display: flex
align-items: center
justify-content: flex-end
column-gap: 20px
.rightIcons
cursor: pointer
height: 30px
width: auto
@import '../../common/_util'
.newCanvas
position: absolute !important
left: 50%
top: 50%
transform: translate(-50%,-50%)
// canvas
// width: auto !important
// height: 100% !important
// object-fit: cover !important
@import '../../common/_util'
.bottomBar
width: 100%
height: 60px
background: $black2
display: flex
align-items: center
justify-content: center
padding: 0 20px
.iconBottomBar
cursor: pointer
height: 40px
width: auto
.left
flex-grow: 1
display: flex
align-items: center
justify-content: flex-start
column-gap: 20px
.leftIcons
cursor: pointer
height: 30px
width: auto
.middle
display: flex
align-items: center
justify-content: center
background: $black1
padding: 10px 20px
border-radius: 10px
column-gap: 20px
.middleIcons
height: 30px
width: auto
cursor: pointer
.right
flex-grow: 1
display: flex
align-items: center
justify-content: flex-end
column-gap: 20px
.rightIcons
cursor: pointer
height: 30px
width: auto
.disabled
cursor: not-allowed
@import '../../common/_util'
.videoMenuLeft
flex-grow: 0
flex-shrink: 0
margin-right: 16px
width: 184px
height: 100%
border-radius: 10px
display: flex
flex-direction: column
overflow-y: scroll
&::-webkit-scrollbar
display: none
-ms-overflow-style: none
scrollbar-width: none
.videoItem
flex-shrink: 0
flex-grow: 0
width: 100%
height: 200px
color: $white
background: $black2
border-radius: 10px
margin-bottom: 10px
display: flex
align-items: center
justify-content: center
font-size: 20px
font-weight: 900
@import '../../common/_util'
.videoObject
// width: auto !important
// height: 100% !important
// object-fit: cover !important
.canvasObject
position: absolute
top: 50%
left: 50%
transform: translate(-50%,-50%) scaleX(-1)
// width: auto !important
// height: 100% !important
// object-fit: cover !important
@import '../../common/_util'
.videoPlaceholder
background: $black3
width: 100%
height: 100%
display: flex
align-items: center
justify-content: center
flex-direction: column
color: $white
font-size: 3rem
@import '../../common/_util'
.videoMenuRight
flex-grow: 0
flex-shrink: 0
background: $black2
margin-left: 16px
width: 284px
height: 100%
border-radius: 10px
display: flex
justify-content: space-between
flex-direction: column
.chatItemCover
flex-grow: 1
font-size: 18px
display: flex
align-items: flex-start
justify-content: flex-start
flex-direction: column
padding: 15px 15px 0px
overflow-y: auto
.chatItem
display: inline-block
margin-bottom: 15px
border-radius: 10px 20px 20px 0px
color: $white
background: $black4
padding: 0.5em 1em
font-weight: 900
.keyItem
display: inline-block
margin-bottom: 15px
border-radius: 10px 20px 20px 0px
color: $white
background: $black4
padding: 0.5em 1em
font-weight: 900
background: green
.hiddenKey
display: none
margin-bottom: 15px
border-radius: 10px 20px 20px 0px
color: $white
background: $black4
padding: 0.5em 1em
font-weight: 900
background: green
.chatInput
flex-grow: 0
flex-shrink: 0
border-top: 10px solid $black !important
padding: 1rem
color: $white
.chatInputItem
opacity: 1
border: none
outline: none
width: 100%
padding: 0.8em
background: $black3!important
background-image: none !important
border-radius: 0 0 10px 10px
font-size: 18px
color: $white
font-weight: 900
.waiting
display: flex
align-items: center
justify-content: space-between
padding: .5rem 1rem
font-size: 1.2rem
color: #fff
background: $black
border-top: 10px solid $black
.submitBtn
background: $black
color: $white
padding: 1rem
border-radius: 10px
width: 100%
margin: 1rem 0
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment