def LoadHagglingData()

in renderer/glViewer.py [0:0]


def LoadHagglingData(fileName):
    global g_hagglingseq_name
    g_hagglingseq_name = fileName[:-4]

    bLoadFace = False
    bLoadAdam = True
    bLoadSkeleton = False
    bLoadKeypoints = False


    fileName_pkl = fileName.replace('npz','pkl')

    if bLoadFace :
        """Load Face data"""
        global g_faceModel
        if g_faceModel is None:
            import scipy.io as sio
            g_faceModel = sio.loadmat('/ssd/data/totalmodel/face_model_totalAligned.mat')

        ##read face mesh parameter
        seqPath = '/ssd/codes/pytorch_motionSynth/motionsynth_data/data/processed_panoptic/panopticDB_faceMesh_pkl_hagglingProcessed/' +fileName_pkl

        faceData = pickle.load( open( seqPath, "rb" ) , encoding='latin1')

        FaceParam_list = GetFaceMesh(g_faceModel,faceData['subjects'])
        setMeshData( FaceParam_list)

        # #read speech
        # seqPath = '/ssd/codes/haggling_audio/panopticDB_pkl_speech_hagglingProcessed/' +fileName
        # motionData = pickle.load( open( seqPath, "rb" ) )
        # setSpeech([motionData['speechData'][0], motionData['speechData'][1], motionData['speechData'][2]])

        #read speech
        seqPath = '/ssd/codes/haggling_audio/panopticDB_pkl_speech_hagglingProcessed/' +fileName_pkl
        motionData = pickle.load( open( seqPath, "rb" ) , encoding='latin1')
        speechData = [motionData['speechData'][0], motionData['speechData'][1], motionData['speechData'][2]]
        #setSpeech([motionData['speechData'][0], motionData['speechData'][1], motionData['speechData'][2]])
        speech_rootData = [FaceParam_list[0]['centers'], FaceParam_list[1]['centers'], FaceParam_list[2]['centers']]
        speech_rootData =np.multiply(speech_rootData, 100.0) #meter to cm
        setSpeech_withRoot(speechData,speech_rootData)

    if bLoadSkeleton:
        """Load Skeleton data (Holden's format)"""

        #Read body, holden's format
        #fileName_npz = fileName.replace('pkl','npz')
        X = np.load('/ssd/codes/pytorch_motionSynth/motionsynth_data/data/processed/panoptic_npz/' + fileName)['clips'] #(17944, 240, 73)
        X = np.swapaxes(X, 1, 2).astype(np.float32) #(17944, 73, 240)
        set_Holden_Data_73([ X[0,:,:], X[1,:,:], X[2,:,:] ], ignore_root=True)
    if bLoadKeypoints:
        LoadHagglingDataKeypoints(fileName_pkl)

    if bLoadAdam:

        from modelViewer.batch_adam import ADAM
        global g_adamWrapper

        """Load Adam data"""
        if g_adamWrapper==None:
            g_adamWrapper = ADAM()

        fileName_pkl = fileName.replace('npz','pkl')

        seqPath = '/ssd/codes/pytorch_motionSynth/motionsynth_data/data/processed_panoptic/panopticDB_adamMesh_pkl_hagglingProcessed_stage1/' +fileName_pkl
        adamParam_all = pickle.load( open( seqPath, "rb" ) , encoding='latin1')

         ##read face mesh parameter
        seqPath = '/ssd/codes/pytorch_motionSynth/motionsynth_data/data/processed_panoptic/panopticDB_faceMesh_pkl_hagglingProcessed/' +fileName_pkl
        faceData = pickle.load( open( seqPath, "rb" ) , encoding='latin1')

        start = time.time()
        meshes =[]
        frameStart = 0
        frameEnd = -1

        #for faceParam in faceParam_selected:
        for adamParam in adamParam_all['subjects']:

            betas = np.swapaxes(adamParam['betas'],0,1)[frameStart:frameEnd]  #Frames  x30
            faces = np.swapaxes(adamParam['faces'],0,1)[frameStart:frameEnd]  #Frames  200

            for faceParam in faceData['subjects']:
                if faceParam['humanId'] == adamParam['humanId']:
                    faces = faceParam['face_exp']
                    faces = np.swapaxes(faces,0,1)[frameStart:frameEnd]  #Frames  200

                    break

            pose = np.swapaxes(adamParam['pose'],0,1)[frameStart:frameEnd]  #Frames  186
            trans = np.swapaxes(adamParam['trans'],0,1)[frameStart:frameEnd]  #Frames  x3
            startTime = time.time()

            #Align the length
            frameNum = min([pose.shape[0], faces.shape[0], betas.shape[0], trans.shape[0]])
            pose = pose[:frameNum]
            faces = faces[:frameNum]
            betas = betas[:frameNum]
            trans = trans[:frameNum]


            v , j = g_adamWrapper(betas,pose,faces) #v:(frameNum, 18540, 3), j: (frameNum, 62, 3)
            print('time: {}'.format(time.time()-startTime))
            v += np.expand_dims(trans, axis=1)  # no translation in their LBS. trans: (humanNumn,30) ->(humanNumn,1, 30)

            v *=0.01

            normals = ComputeNormal(v, g_adamWrapper.f)
            meshes.append( {'ver': v, 'normal': normals, 'f': g_adamWrapper.f})  # support rendering two models together


        setMeshData( meshes)