Options
All
  • Public
  • Public/Protected
  • All
Menu

face-api.js

Index

Enumerations

Classes

Interfaces

Type aliases

Variables

Functions

Object literals

Type aliases

BatchNormParams

BatchNormParams: object

Type declaration

ConvWithBatchNormParams

ConvWithBatchNormParams: BatchNormParams & object

DenseBlock3Params

DenseBlock3Params: object

Type declaration

DenseBlock4Params

DenseBlock4Params: DenseBlock3Params & object

DrawBoxOptions

DrawBoxOptions: object

Type declaration

DrawDetectionOptions

DrawDetectionOptions: object

Type declaration

DrawFaceExpressionsInput

DrawFaceExpressionsInput: WithFaceExpressions<object>

DrawFaceExpressionsOptions

DrawFaceExpressionsOptions: object

Type declaration

DrawLandmarksOptions

DrawLandmarksOptions: object

Type declaration

DrawOptions

DrawOptions: object

Type declaration

DrawTextOptions

DrawTextOptions: object

Type declaration

Environment

Environment: FileSystem & object

FaceDetectionFunction

FaceDetectionFunction: function

Type declaration

FaceDetectionOptions

FaceDetectionOptions: TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TfjsImageRecognitionBase.TinyYolov2Options

FaceExpression

FaceExpression: "neutral" | "happy" | "sad" | "angry" | "fearful" | "disgusted" | "surprised"

FaceExpressionPrediction

FaceExpressionPrediction: object

Type declaration

FaceFeatureExtractorParams

FaceFeatureExtractorParams: object

Type declaration

FileSystem

FileSystem: object

Type declaration

NetParams

NetParams: object

Type declaration

SeparableConvWithBatchNormParams

SeparableConvWithBatchNormParams: object

Type declaration

TMediaElement

TMediaElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement

TNetInput

TNetInput: TNetInputArg | Array<TNetInputArg> | NetInput | tf.Tensor4D

TNetInputArg

TNetInputArg: string | TResolvedNetInput

TResolvedNetInput

TResolvedNetInput: TMediaElement | tf.Tensor3D | tf.Tensor4D

TinyFaceFeatureExtractorParams

TinyFaceFeatureExtractorParams: object

Type declaration

WithFaceDescriptor

WithFaceDescriptor: TSource & object

WithFaceDetection

WithFaceDetection: TSource & object

WithFaceExpressions

WithFaceExpressions: TSource & object

WithFaceLandmarks

WithFaceLandmarks: TSource & object

Variables

Const detectLandmarks

detectLandmarks: detectFaceLandmarks = detectFaceLandmarks

Const loadFaceDetectionModel

loadFaceDetectionModel: loadSsdMobilenetv1Model = loadSsdMobilenetv1Model

Const locateFaces

locateFaces: ssdMobilenetv1 = ssdMobilenetv1

Functions

awaitMediaLoaded

  • awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement): Promise<Object>
  • Parameters

    • media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement

    Returns Promise<Object>

bufferToImage

  • bufferToImage(buf: Blob): Promise<HTMLImageElement>
  • Parameters

    • buf: Blob

    Returns Promise<HTMLImageElement>

Const computeFaceDescriptor

  • computeFaceDescriptor(input: TNetInput): Promise<Float32Array | Float32Array[]>
  • Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image, which uniquely represents the features of that persons face. The computed face descriptor can be used to measure the similarity between faces, by computing the euclidean distance of two face descriptors.

    Parameters

    Returns Promise<Float32Array | Float32Array[]>

    Face descriptor with 128 entries or array thereof in case of batch input.

createBrowserEnv

  • Returns Environment

createCanvas

  • createCanvas(__namedParameters: object): HTMLCanvasElement
  • Parameters

    • __namedParameters: object

    Returns HTMLCanvasElement

createCanvasFromMedia

  • createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement
  • Parameters

    • media: HTMLImageElement | HTMLVideoElement | ImageData
    • Optional dims: IDimensions

    Returns HTMLCanvasElement

createFaceDetectionNet

  • Parameters

    • weights: Float32Array

    Returns SsdMobilenetv1

createFaceRecognitionNet

  • Parameters

    • weights: Float32Array

    Returns FaceRecognitionNet

createFileSystem

  • Parameters

    • Optional fs: any

    Returns FileSystem

createMtcnn

  • createMtcnn(weights: Float32Array): Mtcnn
  • Parameters

    • weights: Float32Array

    Returns Mtcnn

createNodejsEnv

  • Returns Environment

createSsdMobilenetv1

  • Parameters

    • weights: Float32Array

    Returns SsdMobilenetv1

createTinyFaceDetector

  • Parameters

    • weights: Float32Array

    Returns TinyFaceDetector

createTinyYolov2

  • createTinyYolov2(weights: Float32Array, withSeparableConvs?: boolean): TinyYolov2
  • Parameters

    • weights: Float32Array
    • Default value withSeparableConvs: boolean = true

    Returns TinyYolov2

denseBlock3

  • denseBlock3(x: tf.Tensor4D, denseBlockParams: DenseBlock3Params, isFirstLayer?: boolean): tf.Tensor4D
  • Parameters

    • x: tf.Tensor4D
    • denseBlockParams: DenseBlock3Params
    • Default value isFirstLayer: boolean = false

    Returns tf.Tensor4D

denseBlock4

  • denseBlock4(x: tf.Tensor4D, denseBlockParams: DenseBlock4Params, isFirstLayer?: boolean, isScaleDown?: boolean): tf.Tensor4D
  • Parameters

    • x: tf.Tensor4D
    • denseBlockParams: DenseBlock4Params
    • Default value isFirstLayer: boolean = false
    • Default value isScaleDown: boolean = true

    Returns tf.Tensor4D

depthwiseSeparableConv

  • depthwiseSeparableConv(x: tf.Tensor4D, params: TfjsImageRecognitionBase.SeparableConvParams, stride: [number, number]): tf.Tensor4D
  • Parameters

    • x: tf.Tensor4D
    • params: TfjsImageRecognitionBase.SeparableConvParams
    • stride: [number, number]

    Returns tf.Tensor4D

detectAllFaces

Const detectFaceLandmarks

  • Detects the 68 point face landmark positions of the face shown in an image.

    Parameters

    Returns Promise<FaceLandmarks68 | FaceLandmarks68[]>

    68 point face landmarks or array thereof in case of batch input.

Const detectFaceLandmarksTiny

  • Detects the 68 point face landmark positions of the face shown in an image using a tinier version of the 68 point face landmark model, which is slightly faster at inference, but also slightly less accurate.

    Parameters

    Returns Promise<FaceLandmarks68 | FaceLandmarks68[]>

    68 point face landmarks or array thereof in case of batch input.

detectSingleFace

drawBox

  • drawBox(ctx: CanvasRenderingContext2D, x: number, y: number, w: number, h: number, options: DrawBoxOptions): void
  • Parameters

    • ctx: CanvasRenderingContext2D
    • x: number
    • y: number
    • w: number
    • h: number
    • options: DrawBoxOptions

    Returns void

drawContour

  • drawContour(ctx: CanvasRenderingContext2D, points: Point[], isClosed?: boolean): void
  • Parameters

    • ctx: CanvasRenderingContext2D
    • points: Point[]
    • Default value isClosed: boolean = false

    Returns void

drawDetection

drawFaceExpressions

drawLandmarks

drawText

  • drawText(ctx: CanvasRenderingContext2D, x: number, y: number, text: string, options?: DrawTextOptions): void
  • Parameters

    • ctx: CanvasRenderingContext2D
    • x: number
    • y: number
    • text: string
    • Default value options: DrawTextOptions = {}

    Returns void

euclideanDistance

  • euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array): number
  • Parameters

    • arr1: number[] | Float32Array
    • arr2: number[] | Float32Array

    Returns number

extendWithFaceDescriptor

  • extendWithFaceDescriptor<TSource>(sourceObj: TSource, descriptor: Float32Array): WithFaceDescriptor<TSource>
  • Type parameters

    • TSource

    Parameters

    • sourceObj: TSource
    • descriptor: Float32Array

    Returns WithFaceDescriptor<TSource>

extendWithFaceDetection

  • Type parameters

    • TSource

    Parameters

    Returns WithFaceDetection<TSource>

extendWithFaceExpressions

extendWithFaceLandmarks

  • extendWithFaceLandmarks<TSource, TFaceLandmarks>(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks<TSource, TFaceLandmarks>
  • Type parameters

    Parameters

    • sourceObj: TSource
    • unshiftedLandmarks: TFaceLandmarks

    Returns WithFaceLandmarks<TSource, TFaceLandmarks>

extractFaceTensors

  • extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array<FaceDetection | Rect>): Promise<tf.Tensor3D[]>
  • Extracts the tensors of the image regions containing the detected faces. Useful if you want to compute the face descriptors for the face images. Using this method is faster then extracting a canvas for each face and converting them to tensors individually.

    Parameters

    • imageTensor: tf.Tensor3D | tf.Tensor4D

      The image tensor that face detection has been performed on.

    • detections: Array<FaceDetection | Rect>

      The face detection results or face bounding boxes for that image.

    Returns Promise<tf.Tensor3D[]>

    Tensors of the corresponding image region for each detected face.

extractFaces

  • Extracts the image regions containing the detected faces.

    Parameters

    • input: TNetInput

      The image that face detection has been performed on.

    • detections: Array<FaceDetection | Rect>

      The face detection results or face bounding boxes for that image.

    Returns Promise<HTMLCanvasElement[]>

    The Canvases of the corresponding image region for each detected face.

extractParams

  • extractParams(weights: Float32Array): object
  • Parameters

    • weights: Float32Array

    Returns object

extractParamsFromWeigthMap

  • extractParamsFromWeigthMap(weightMap: tf.NamedTensorMap): object
  • Parameters

    • weightMap: tf.NamedTensorMap

    Returns object

extractParamsFromWeigthMapTiny

  • extractParamsFromWeigthMapTiny(weightMap: tf.NamedTensorMap): object
  • Parameters

    • weightMap: tf.NamedTensorMap

    Returns object

extractParamsTiny

  • extractParamsTiny(weights: Float32Array): object
  • Parameters

    • weights: Float32Array

    Returns object

extractorsFactory

  • extractorsFactory(extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction, paramMappings: TfjsImageRecognitionBase.ParamMapping[]): object
  • Parameters

    • extractWeights: TfjsImageRecognitionBase.ExtractWeightsFunction
    • paramMappings: TfjsImageRecognitionBase.ParamMapping[]

    Returns object

fetchImage

  • fetchImage(uri: string): Promise<HTMLImageElement>
  • Parameters

    • uri: string

    Returns Promise<HTMLImageElement>

fetchJson

  • fetchJson<T>(uri: string): Promise<T>
  • Type parameters

    • T

    Parameters

    • uri: string

    Returns Promise<T>

fetchNetWeights

  • fetchNetWeights(uri: string): Promise<Float32Array>
  • Parameters

    • uri: string

    Returns Promise<Float32Array>

fetchOrThrow

  • fetchOrThrow(url: string, init?: RequestInit): Promise<Response>
  • Parameters

    • url: string
    • Optional init: RequestInit

    Returns Promise<Response>

fullyConnectedLayer

  • fullyConnectedLayer(x: tf.Tensor2D, params: TfjsImageRecognitionBase.FCParams): tf.Tensor2D
  • Parameters

    • x: tf.Tensor2D
    • params: TfjsImageRecognitionBase.FCParams

    Returns tf.Tensor2D

getContext2dOrThrow

  • getContext2dOrThrow(canvas: HTMLCanvasElement): CanvasRenderingContext2D
  • Parameters

    • canvas: HTMLCanvasElement

    Returns CanvasRenderingContext2D

getDefaultDrawOptions

  • Parameters

    • Default value options: any = {}

    Returns DrawOptions

getMediaDimensions

  • getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions
  • Parameters

    • input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions

    Returns Dimensions

imageTensorToCanvas

  • imageTensorToCanvas(imgTensor: tf.Tensor, canvas?: HTMLCanvasElement): Promise<HTMLCanvasElement>
  • Parameters

    • imgTensor: tf.Tensor
    • Optional canvas: HTMLCanvasElement

    Returns Promise<HTMLCanvasElement>

imageToSquare

  • imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage?: boolean): HTMLCanvasElement
  • Parameters

    • input: HTMLImageElement | HTMLCanvasElement
    • inputSize: number
    • Default value centerImage: boolean = false

    Returns HTMLCanvasElement

inverseSigmoid

  • inverseSigmoid(x: number): number
  • Parameters

    • x: number

    Returns number

iou

  • iou(box1: Box, box2: Box, isIOU?: boolean): number
  • Parameters

    • box1: Box
    • box2: Box
    • Default value isIOU: boolean = true

    Returns number

isBrowser

  • isBrowser(): boolean
  • Returns boolean

isMediaElement

  • isMediaElement(input: any): boolean
  • Parameters

    • input: any

    Returns boolean

isMediaLoaded

  • isMediaLoaded(media: HTMLImageElement | HTMLVideoElement): boolean
  • Parameters

    • media: HTMLImageElement | HTMLVideoElement

    Returns boolean

isNodejs

  • isNodejs(): boolean
  • Returns boolean

Const loadFaceExpressionModel

  • loadFaceExpressionModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadFaceLandmarkModel

  • loadFaceLandmarkModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadFaceLandmarkTinyModel

  • loadFaceLandmarkTinyModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadFaceRecognitionModel

  • loadFaceRecognitionModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadMtcnnModel

  • loadMtcnnModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

loadParamsFactory

  • loadParamsFactory(weightMap: any, paramMappings: TfjsImageRecognitionBase.ParamMapping[]): object
  • Parameters

    • weightMap: any
    • paramMappings: TfjsImageRecognitionBase.ParamMapping[]

    Returns object

Const loadSsdMobilenetv1Model

  • loadSsdMobilenetv1Model(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadTinyFaceDetectorModel

  • loadTinyFaceDetectorModel(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

Const loadTinyYolov2Model

  • loadTinyYolov2Model(url: string): Promise<void>
  • Parameters

    • url: string

    Returns Promise<void>

loadWeightMap

  • loadWeightMap(uri: string | undefined, defaultModelName: string): Promise<tf.NamedTensorMap>
  • Parameters

    • uri: string | undefined
    • defaultModelName: string

    Returns Promise<tf.NamedTensorMap>

Const mtcnn

  • Attempts to detect all faces in an image and the 5 point face landmarks of each detected face using the MTCNN Network.

    Parameters

    • input: TNetInput

      The input image.

    • options: MtcnnOptions

      (optional, default: see MtcnnOptions constructor for default parameters).

    Returns Promise<WithFaceLandmarks<WithFaceDetection<__type>, FaceLandmarks5>[]>

    Bounding box of each face with score and 5 point face landmarks.

nonMaxSuppression

  • nonMaxSuppression(boxes: Box[], scores: number[], iouThreshold: number, isIOU?: boolean): number[]
  • Parameters

    • boxes: Box[]
    • scores: number[]
    • iouThreshold: number
    • Default value isIOU: boolean = true

    Returns number[]

normalize

  • normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D
  • Parameters

    • x: tf.Tensor4D
    • meanRgb: number[]

    Returns tf.Tensor4D

padToSquare

  • padToSquare(imgTensor: tf.Tensor4D, isCenterImage?: boolean): tf.Tensor4D
  • Pads the smaller dimension of an image tensor with zeros, such that width === height.

    Parameters

    • imgTensor: tf.Tensor4D

      The image tensor.

    • Default value isCenterImage: boolean = false

      (optional, default: false) If true, add an equal amount of padding on both sides of the minor dimension oof the image.

    Returns tf.Tensor4D

    The padded tensor with width === height.

Const recognizeFaceExpressions

  • Recognizes the facial expressions of a face and returns the likelyhood of each facial expression.

    Parameters

    Returns Promise<FaceExpressionPrediction[] | FaceExpressionPrediction[][]>

    An array of facial expressions with corresponding probabilities or array thereof in case of batch input.

resizeResults

  • resizeResults<T>(results: T, __namedParameters: object): T
  • Type parameters

    • T

    Parameters

    • results: T
    • __namedParameters: object

    Returns T

resolveInput

  • resolveInput(arg: string | any): any
  • Parameters

    • arg: string | any

    Returns any

seperateWeightMaps

  • seperateWeightMaps(weightMap: tf.NamedTensorMap): object
  • Parameters

    • weightMap: tf.NamedTensorMap

    Returns object

shuffleArray

  • shuffleArray(inputArray: any[]): any[]
  • Parameters

    • inputArray: any[]

    Returns any[]

sigmoid

  • sigmoid(x: number): number
  • Parameters

    • x: number

    Returns number

Const ssdMobilenetv1

  • Attempts to detect all faces in an image using SSD Mobilenetv1 Network.

    Parameters

    Returns Promise<FaceDetection[]>

    Bounding box of each face with score.

Const tinyFaceDetector

  • Attempts to detect all faces in an image using the Tiny Face Detector.

    Parameters

    Returns Promise<FaceDetection[]>

    Bounding box of each face with score.

Const tinyYolov2

  • tinyYolov2(input: TNetInput, options: TfjsImageRecognitionBase.ITinyYolov2Options): Promise<FaceDetection[]>
  • Attempts to detect all faces in an image using the Tiny Yolov2 Network.

    Parameters

    • input: TNetInput

      The input image.

    • options: TfjsImageRecognitionBase.ITinyYolov2Options

      (optional, default: see TinyYolov2Options constructor for default parameters).

    Returns Promise<FaceDetection[]>

    Bounding box of each face with score.

toNetInput

  • Validates the input to make sure, they are valid net inputs and awaits all media elements to be finished loading.

    Parameters

    Returns Promise<NetInput>

    A NetInput instance, which can be passed into one of the neural networks.

Object literals

Const env

env: object

createBrowserEnv

createBrowserEnv: createBrowserEnv

createFileSystem

createFileSystem: createFileSystem

createNodejsEnv

createNodejsEnv: createNodejsEnv

getEnv

getEnv: getEnv

initialize

initialize: initialize

isBrowser

isBrowser: isBrowser

isNodejs

isNodejs: isNodejs

monkeyPatch

monkeyPatch: monkeyPatch

setEnv

setEnv: setEnv

Const faceExpressionLabels

faceExpressionLabels: object

angry

angry: number = 3

disgusted

disgusted: number = 5

fearful

fearful: number = 4

happy

happy: number = 1

neutral

neutral: number = 0

sad

sad: number = 2

surprised

surprised: number = 6

Const nets

nets: object

faceExpressionNet

faceExpressionNet: FaceExpressionNet = new FaceExpressionNet()

faceLandmark68Net

faceLandmark68Net: FaceLandmark68Net = new FaceLandmark68Net()

faceLandmark68TinyNet

faceLandmark68TinyNet: FaceLandmark68TinyNet = new FaceLandmark68TinyNet()

faceRecognitionNet

faceRecognitionNet: FaceRecognitionNet = new FaceRecognitionNet()

mtcnn

mtcnn: Mtcnn = new Mtcnn()

ssdMobilenetv1

ssdMobilenetv1: SsdMobilenetv1 = new SsdMobilenetv1()

tinyFaceDetector

tinyFaceDetector: TinyFaceDetector = new TinyFaceDetector()

tinyYolov2

tinyYolov2: TinyYolov2 = new TinyYolov2()

Generated using TypeDoc