赞
踩
项目需要接入对话口型动作,所以将OVRLipSync集成到项目中。
1、下载OVRLipSync.unitypackage
2、导入完成后可以参看demo,我这边主要是导入了模型动作的功能。我们主要关注的是OVRLipSync.cs,OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs类
3、OVRLipSync.cs 主要是导入Dll接口函数。需要预先初始化这个。可以在Scence创建一个GameObject绑上这个脚本。
4、我将OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs 合并为一个.cs 脚本。
[RequireComponent(typeof(AudioSource))]
public class OVRLipSyncContextEx: MonoBehaviour
{
public AudioSource audioSource = null;
public float gain = 1.0f;
public OVRLipSync.ovrLipSyncContextProvider provider = OVRLipSync.ovrLipSyncContextProvider.Main;
public bool delayCompensate = false;
private OVRLipSync.ovrLipSyncFrame frame = new OVRLipSync.ovrLipSyncFrame(0);
private uint context = 0; // 0 is no context
public SkinnedMeshRenderer skinnedMeshRenderer = null;
// 目标的模型动作名称,这个美术给定
public int [] VisemeToBlendTargets = new int[(int)OVRLipSync.ovrLipSyncViseme.Count];
void Awake()
{
// Cache the audio source we are going to be using to pump data to the SR
if (!audioSource) audioSource = GetComponent<AudioSource>();
if (!audioSource) return;
}
void Start()
{
lock (this) //不存在异步同时,可以去掉lock
{
if (context == 0)
{
if (OVRLipSync.CreateContext(ref context, provider) != OVRLipSync.ovrLipSyncSuccess)
{
Debug.Log("OVRPhonemeContext.Start ERROR: Could not create Phoneme context.");
return;
}
}
//通过AudioSourceListener 去获取Audio的数据。这个是Unity大概没20ms会出发一次。
gameObject.GetComponent<AudioSourceListener>().OnEventAudioFilterRead += AudioFilterRead;
}
SendSignal(OVRLipSync.ovrLipSyncSignals.VisemeSmoothing, SmoothAmount, 0);
//测试动作名称
VisemeToBlendTargets[0] = "doubt";
VisemeToBlendTargets[1] = "smile";
VisemeToBlendTargets[2] = "anger";
VisemeToBlendTargets[3] = "surprise";
VisemeToBlendTargets[4] = "scare";
VisemeToBlendTargets[5] = "nervous";
VisemeToBlendTargets[6] = "upset";
VisemeToBlendTargets[7] = "tiresome";
VisemeToBlendTargets[8] = "bashful";
VisemeToBlendTargets[9] = "greedy";
VisemeToBlendTargets[10] = "doubt";
VisemeToBlendTargets[11] = "doubt";
VisemeToBlendTargets[12] = "doubt";
VisemeToBlendTargets[13] = "doubt";
VisemeToBlendTargets[14] = "doubt";
}
void Update()
{
if(skinnedMeshRenderer != null)
{
if(GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess)
{
SetVisemeToMorphTarget();
}
}
}
void OnDestroy()
{
// Create the context that we will feed into the audio buffer
lock (this) //不存在异步同时,可以去掉lock
{
if (context != 0)
{
if (OVRLipSync.DestroyContext(context) != OVRLipSync.ovrLipSyncSuccess)
{
Debug.Log("OVRPhonemeContext.OnDestroy ERROR: Could not delete Phoneme context.");
}
}
}
}
void AudioFilterRead(float[] data, int channels)
{
// Do not spatialize if we are not initialized, or if there is no
// audio source attached to game object
if ((OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) || audioSource == null)
return;
// increase the gain of the input to get a better signal input
for (int i = 0; i < data.Length; ++i)
data[i] = data[i] * gain;
// Send data into Phoneme context for processing (if context is not 0)
lock (this) //不存在异步同时,可以去掉lock
{
if (context != 0)
{
OVRLipSync.ovrLipSyncFlag flags = 0;
// Set flags to feed into process
if (delayCompensate == true)
flags |= OVRLipSync.ovrLipSyncFlag.DelayCompensateAudio;
OVRLipSync.ProcessFrameInterleaved(context, data, flags, ref frame);
}
}
}
public int GetCurrentPhonemeFrame(ref OVRLipSync.ovrLipSyncFrame inFrame)
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
lock (this) //不存在异步同时,可以去掉lock
{
inFrame.frameNumber = frame.frameNumber;
inFrame.frameDelay = frame.frameDelay;
for (int i = 0; i < inFrame.Visemes.Length; i++)
{
inFrame.Visemes[i] = frame.Visemes[i];
}
}
return OVRLipSync.ovrLipSyncSuccess;
}
public int ResetContext()
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
return OVRLipSync.ResetContext(context);
}
//暂时没用到
public int SendSignal(OVRLipSync.ovrLipSyncSignals signal, int arg1, int arg2)
{
if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess)
return (int)OVRLipSync.ovrLipSyncError.Unknown;
return OVRLipSync.SendSignal(context, signal, arg1, arg2);
}
void SetVisemeToMorphTarget()
{
for (int i = 0; i < VisemeToBlendTargets.Length; i++)
{
if(VisemeToBlendTargets[i] != -1)
{
// 播放对应模型的动作权重。
skinnedMeshRenderer.SetBlendShapeWeight(VisemeToBlendTargets[i], frame.Visemes[i] * 100.0f);
}
}
}
}
/以上是声音绑定在模型上播放。由于很多声音播放相对独立。可以实现以下函数。
float m_detaTime = 0;
void Update()
{
if (m_bSpeak)
{
// trap inputs and send signals to phoneme engine for testing purposes
AudioItem item = AudioController.GetAudioItem(speakKey);
if (item != null)
{
if (m_obj) // 播放声音的GameObject 外部传入
{
AudioSource ads = m_obj.GetComponent<AudioSource>();
if (ads)
{
//unity3d OnAudioFilterRead 大概23毫秒回调一次。只新数据来,才在Update内去更新
// demo里不加的话,会出现嘴唇抖动的问题。
if (m_detaTime < 0.023f) // 48000/2048;
{
m_detaTime += Time.deltaTime;
}
else
{
// get the current viseme frame
if (GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess)
{
SetVisemeToMorphTarget();
}
m_detaTime = 0;
}
}
}
}
}
}
增加一个deglet 回调类 放在AduioSource下
public class AudioSourceListener : MonoBehaviour
{
public delegate void DgtEventAudioFilterRead(float[] data, int channels,AudioObject obj);
public DgtEventAudioFilterRead OnEventAudioFilterRead;
private AudioObject m_audio_obj;
// Use this for initialization
public void Start()
{
m_audio_obj = this.gameObject.GetComponent<AudioObject>();
}
// Update is called once per frame
void Update()
{
}
void OnAudioFilterRead(float[] data, int channels)
{
if (OnEventAudioFilterRead != null)
{
OnEventAudioFilterRead(data,channels, m_audio_obj);
}
}
}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。