Coder Social home page Coder Social logo

Script for AGORA about niki HOT 10 CLOSED

MohammadKhalid avatar MohammadKhalid commented on June 3, 2024
Script for AGORA

from niki.

Comments (10)

biansy000 avatar biansy000 commented on June 3, 2024

Here is the code. You should uncomment the relevant lines of codes between L454 - L476 to generate the training dataset and validation dataset for SMPL and SMPL-X respectively each time you run the code. Please inform us if you have any further problems.

In the code, SMPL_layer_kid and SMPLXLayer are adopted from HybrIK, and you can copy them from here.

import pickle as pk
import os
import random
from re import L
from xml.etree.ElementTree import TreeBuilder
import torch
import numpy as np
# import torch.nn as nn
import joblib

from hybrik.models.layers.smpl.SMPL import SMPL_layer, SMPL_layer_kid
from hybrik.models.layers.smplx.body_models import SMPLXLayer
from hybrik.models.layers.smplx.joint_names import JOINT_NAMES
from hybrik.models.layers.smpl.lbs import batch_rodrigues
import torch.utils.data as data
from tqdm import tqdm
import pandas
import copy

# import copy
import gc
import cv2
import io
gc.enable()
# torch.multiprocessing.set_sharing_strategy('file_system')

# ['xyz_17', 'vid_name', 'frame_id', 'joints3D', 'joints2D', 'shape', 'pose', 
# 'bbox', 'img_name', 'valid', 'xyz_29', 'twist_angle', 'uv_24', 'cam_param', 'width', 'height', 'amb_center_scale', 'amb_synth_size', 'img_paths', 'kpt_cam_origin', 'valid_camera']


class CPU_Unpickler(pk.Unpickler):
    def find_class(self, module, name):
        if module == 'torch.storage' and name == '_load_from_bytes':
            return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
        else: return super().find_class(module, name)

# contents = CPU_Unpickler(f).load()

class naive_dataset(data.Dataset):
    def __init__(self, annotation_path, parent_path='data/AGORA', use_gendered_smplx=True, use_kid=False):
        if 'smplx' in annotation_path or 'SMPLX' in annotation_path:
            model_type = 'smplx'
        else:
            model_type = 'smpl'

        if model_type == 'smpl':
            h36m_jregressor = np.load('./model_files/J_regressor_h36m.npy')
            smpl = SMPL_layer_kid(
                './model_files/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl',
                kid_template_path='model_files/smpl_v1.1.0/smil/smpl_kid_template.npy',
                h36m_jregressor=h36m_jregressor,
                dtype=torch.float32
            )
            self.smpl = smpl

        else:
            self.smplx_layer_neutral = SMPLXLayer(
                # model_path='model_files/smpl_v1.1.0/smplx/SMPLX_NEUTRAL.npz',
                model_path='/home/biansiyuan/git/agora_evaluation/demo/model/smplx/SMPLX_NEUTRAL.npz',
                num_betas=10,
                use_pca=False,
                age='kid',
                kid_template_path='/home/biansiyuan/git/agora_evaluation/utils/smplx_kid_template.npy',
            )

            self.smplx_layer_male = SMPLXLayer(
                # model_path='model_files/smpl_v1.1.0/smplx/SMPLX_MALE.npz',
                model_path='/home/biansiyuan/git/agora_evaluation/demo/model/smplx/SMPLX_MALE.npz',
                num_betas=10,
                use_pca=False,
                age='kid',
                kid_template_path='/home/biansiyuan/git/agora_evaluation/utils/smplx_kid_template.npy',
            )

            self.smplx_layer_female = SMPLXLayer(
                # model_path='model_files/smpl_v1.1.0/smplx/SMPLX_FEMALE.npz',
                model_path='/home/biansiyuan/git/agora_evaluation/demo/model/smplx/SMPLX_FEMALE.npz',
                num_betas=10,
                use_pca=False,
                age='kid',
                kid_template_path='/home/biansiyuan/git/agora_evaluation/utils/smplx_kid_template.npy',
            )

        self.model_type = model_type
        self.use_gendered_smplx = use_gendered_smplx

        df = pandas.read_pickle(annotation_path)

        self.annotation_path = annotation_path.split('/')[-1]
        self.use_kid = use_kid
        
        human_records = []
        for i in range(df.shape[0]):

            model_paths = df.iloc[i][f'gt_path_{model_type}']
            cnt = 0
            human_record = []
            for k in range(len(model_paths)):
                if use_kid or (not df.iloc[i]['kid'][k]):
                    human_record.append({
                        'idx_win_img': k,
                        'img_idx': i
                    })

                if df.iloc[i]['kid'][k]:
                    # print('kid')
                    cnt += 1

            human_records += human_record

        print('kid ratio', cnt *1.0 / len(model_paths))
        
        self.human_records = human_records
        self.df = df
    
    def __len__(self):
        return len(self.human_records)
    
    def __getitem__(self, i):
        # gc.collect()

        human_record = self.human_records[i]
        idx_win_img, img_idx = human_record['idx_win_img'], human_record['img_idx']

        gt_joints_2d = (self.df.iloc[img_idx]['gt_joints_2d'])[idx_win_img]
        # print('gt_joints_2d', gt_joints_2d.shape)
        gt_joints_3d = (self.df.iloc[img_idx]['gt_joints_3d'])[idx_win_img]
        gender = (self.df.iloc[img_idx]['gender'])[idx_win_img]
        img_path = copy.deepcopy(self.df.iloc[img_idx]['imgPath'])
        # print(img_path)
        isValid = self.df.iloc[img_idx]['isValid'][idx_win_img]
        occlusion = self.df.iloc[img_idx]['occlusion'][idx_win_img]
        is_kid = self.df.iloc[img_idx]['kid'][idx_win_img]

        processed_vertices = self.df.iloc[img_idx]['gt_verts'][idx_win_img]

        model_data_path_raw = self.df.iloc[img_idx][f'gt_path_{self.model_type}'][idx_win_img]
        model_data_path_raw = model_data_path_raw.split('.')[0] + '.pkl' # switch to pkl file

        model_data_path = os.path.join('data/AGORA/annotations', model_data_path_raw)
        with open(model_data_path, 'rb') as f:
            # model_data = pk.load(f)
            model_data = CPU_Unpickler(f).load()

        if self.model_type == 'smpl':
            betas = model_data['betas'].detach().cpu().reshape(1, -1).float()
            root_pose = model_data['root_pose'].detach().cpu().reshape(1, 1, 3)
            body_pose = model_data['body_pose'].detach().cpu().reshape(1, 23, 3)
            pose = torch.cat([root_pose, body_pose], dim=1).float()

            if is_kid:
                shape_kid = betas[:, [-1]].clone()
            else:
                shape_kid = torch.zeros((1, 1)).float()
                betas = torch.cat([betas, shape_kid], dim=1)

            with torch.no_grad():
                smpl_out = self.smpl(
                    pose_axis_angle=pose,
                    betas=betas,
                    global_orient=None,
                    transl=None,
                    return_verts=True,
                    return_29_jts=True, 
                    is_get_twist=True
                )

                xyz_29 = smpl_out.joints.cpu().numpy().reshape(-1, 3)
                twist_angle = smpl_out.twist_angle.cpu().numpy()[0]
                xyz_17 = smpl_out.joints_from_verts.cpu().numpy().reshape(-1, 3)

            vertices = smpl_out.vertices

            rotmat = self.calculate_rot(vertices, processed_vertices)
            pose_np = self.rectify_pose(pose.numpy().reshape(-1), rotmat)

            xyz_17 = np.einsum('ij,kj->ki', rotmat, xyz_17)
            xyz_29 = np.einsum('ij,kj->ki', rotmat, xyz_29)

            xyz_29 = xyz_29 - xyz_29[0]
            xyz_17 = xyz_17 - xyz_17[0]

            gt_joints_3d = gt_joints_3d.reshape(-1, 3)
            gt_joints_3d_align = gt_joints_3d - gt_joints_3d[0]

            kpt_err = np.abs(gt_joints_3d_align[:24] - xyz_29[:24])
            assert (kpt_err < 1e-5).all(), kpt_err

            cam_param, uv_29 = self.calculate_projection(xyz_29[:24].copy(), gt_joints_2d[:24].copy(), xyz_29.copy(), num_jts=24)

            target = {
                'ann_path': self.annotation_path,
                'img_path': img_path,
                'gender': gender,
                'occlusion': occlusion,
                'is_valid': isValid,
                'is_kid': is_kid,
                'xyz_17': torch.from_numpy(xyz_17).float(),
                'xyz_29': torch.from_numpy(xyz_29).float(),
                'twist_angle': torch.from_numpy(twist_angle).float(),
                'pose': torch.from_numpy(pose_np).float(),
                'shape': betas[:, :10].reshape(10).float(),
                'shape_kid': shape_kid.reshape(1).float(),
                'uv_24': torch.from_numpy(gt_joints_2d).float(),
                'uv_29': torch.from_numpy(uv_29.reshape(29, 2)).float(),
                'gt_joints_3d': torch.from_numpy(gt_joints_3d).float(),
                'cam_param': torch.from_numpy(cam_param).float()
            }
        else:
            if is_kid and gender=='female':
                gendered_smplx_layer = self.smplx_layer_neutral
            elif gender == 'male':
                gendered_smplx_layer = self.smplx_layer_male
            elif gender == 'female':
                gendered_smplx_layer = self.smplx_layer_female
            else:
                gendered_smplx_layer = self.smplx_layer_neutral
            
            smplx_layer = self.smplx_layer_neutral

            betas = torch.from_numpy(model_data['betas']).reshape(1, -1).float()
            global_orient = torch.from_numpy(model_data['global_orient']).reshape(1, 1, 3)
            body_pose = torch.from_numpy(model_data['body_pose']).reshape(1, 21, 3)

            if is_kid:
                shape_kid = betas[:, [-1]].clone()
                # print('is_kid', gender, i)
            else:
                shape_kid = torch.zeros((1, 1)).float()
                betas = torch.cat([betas, shape_kid], dim=1)

            left_hand_pose = torch.from_numpy(model_data['left_hand_pose']).reshape(1, 15, 3)
            right_hand_pose = torch.from_numpy(model_data['right_hand_pose']).reshape(1, 15, 3)
            jaw_pose = torch.from_numpy(model_data['jaw_pose']).reshape(1, 1, 3)
            leye_pose = torch.from_numpy(model_data['leye_pose']).reshape(1, 1, 3)
            reye_pose = torch.from_numpy(model_data['reye_pose']).reshape(1, 1, 3)
            expression = torch.from_numpy(model_data['expression']).reshape(1, 10)
            # keypoints_3d_raw = torch.from_numpy(model_data['keypoints_3d']).reshape(1, -1, 3)
            # v = torch.from_numpy(model_data['v']).reshape(1, 10475, 3)
            # assert model_data['gender'] == gender, model_data['gender'] + gender + f' {img_path}, {idx_win_img}'

            full_pose = torch.cat( [global_orient, body_pose, jaw_pose, 
                            leye_pose, reye_pose, left_hand_pose, right_hand_pose], dim=1)

            full_pose = full_pose + smplx_layer.pose_mean.reshape(-1, 3)
            full_pose_rotmat = batch_rodrigues(full_pose.reshape(-1, 3)).reshape(1, -1, 3, 3)

            # smplx_layer = self.smplx_layer_neutral

            with torch.no_grad():
                smplx_out = smplx_layer.forward_simple(
                    betas=betas,
                    expression=expression,
                    full_pose=full_pose_rotmat,
                    return_verts=True,
                    # use_pose_mean=True
                )

                joints_basic = smplx_out.joints[:, :55]
                vertices = smplx_out.vertices
                joints_hybrik = smplx_layer.get_extended_joints(joints_basic, vertices)

                joints_hybrik = joints_hybrik.numpy().reshape(-1, 3)
                joints_full = smplx_out.joints.numpy().reshape(-1, 3)

                gendered_smplx_out = gendered_smplx_layer.forward_simple(
                    betas=betas,
                    expression=expression,
                    full_pose=full_pose_rotmat,
                    return_verts=True,
                    # use_pose_mean=True
                )

                if self.use_gendered_smplx:
                    # joints_full = gendered_smplx_out.joints.numpy().reshape(-1, 3)
                    # joints_hybrik_old = joints_hybrik.copy()
                    joints_hybrik = gendered_smplx_layer.get_extended_joints(gendered_smplx_out.joints[:, :55].clone(), gendered_smplx_out.vertices.clone())
                    joints_hybrik = joints_hybrik.numpy().reshape(-1, 3)

                    # print(joints_hybrik_old - joints_hybrik)

                rotmat = self.calculate_rot(gendered_smplx_out.vertices, processed_vertices)
                pose_np = self.rectify_pose(full_pose.numpy().reshape(-1), rotmat)

                gendered_joints = gendered_smplx_out.joints.cpu().numpy().reshape(-1, 3)

                if self.use_gendered_smplx:
                    twist_layer = gendered_smplx_layer
                else:
                    twist_layer = smplx_layer

                twist_angle = twist_layer.forward_get_twist(
                    betas=betas,
                    expression=expression,
                    full_pose=full_pose_rotmat,
                ).numpy().squeeze(0)

                twist_angle2 = torch.from_numpy(twist_angle).reshape(1, -1, 1).float()
                phis = torch.cat(
                    [torch.cos(twist_angle2), torch.sin(twist_angle2)], dim=2
                )

            joints_full = np.einsum('ij,kj->ki', rotmat, joints_full)
            joints_hybrik = np.einsum('ij,kj->ki', rotmat, joints_hybrik)
            gendered_joints = np.einsum('ij,kj->ki', rotmat, gendered_joints)
            gendered_joints = gendered_joints - gendered_joints[0]

            joints_hybrik = joints_hybrik - joints_hybrik[0]

            gt_joints_3d_align = gt_joints_3d - gt_joints_3d[0]
            joints_full_aligned = joints_full - joints_full[0]

            kpt_err = np.abs(gt_joints_3d_align - gendered_joints)
            assert (kpt_err < 1e-5).all(), kpt_err

            cam_param, uv_hybrik = self.calculate_projection(joints_hybrik[:55].copy(), gt_joints_2d[:55].copy(), joints_hybrik.copy())

            #####################################
            # full_pose_rotmat2 = batch_rodrigues(torch.from_numpy(pose_np).reshape(-1, 3).float()).reshape(1, -1, 3, 3)
            # smplx_out = smplx_layer.forward_simple(
            #         betas=betas,
            #         expression=expression,
            #         full_pose=full_pose_rotmat2,
            #         return_verts=True,
            #     )
            # vertices = smplx_out.vertices

            # hybrik_out = smplx_layer.hybrik(
            #     betas=betas,
            #     expression=expression,
            #     pose_skeleton=torch.from_numpy(joints_hybrik).reshape(1, -1, 3).float(),
            #     phis=phis
            # )

            # hybrik_vertices = hybrik_out.vertices

            # print(torch.abs(hybrik_vertices - vertices).max())

            target = {
                'ann_path': self.annotation_path,
                'img_path': img_path,
                'gender': gender,
                'occlusion': occlusion,
                'is_valid': isValid,
                'is_kid': is_kid,
                'joints_hybrik': torch.from_numpy(joints_hybrik).float(),
                'joints_full': torch.from_numpy(joints_full).float(),
                'full_pose': torch.from_numpy(pose_np).float(),
                'shape': betas[:, :10].reshape(10).float(),
                'expression': expression.reshape(10).float(),
                'twist_angle': torch.from_numpy(twist_angle).float(),
                'gt_joints_2d': torch.from_numpy(gt_joints_2d).float(),
                'gt_joints_3d': torch.from_numpy(gt_joints_3d).float(),
                'gendered_smpl': self.use_gendered_smplx*1.0,
                'use_mean_pose': 0.0,
                'shape_kid': shape_kid.reshape(1).float(),
                'uv_hybrik': torch.from_numpy(uv_hybrik).float(),
                'cam_param': cam_param
            }

        return target

    def calculate_rot(self, v_origin, v_actual):
        # get the answer by solving the 
        v_origin = v_origin.reshape(-1, 3)
        v_actual = v_actual.reshape(-1, 3)
        v_origin = v_origin - v_origin.mean(axis=0)
        v_actual = v_actual - v_actual.mean(axis=0)

        v_origin_s = v_origin
        v_actual_s = v_actual

        S = np.matmul(v_origin_s.T, v_actual_s)
        U, Sig, Vh = np.linalg.svd(S)

        rot_mat = np.matmul(Vh.T, U.T)

        mean_error = v_actual - np.einsum('ij,kj->ki', rot_mat, v_origin)

        mean_error = np.abs(mean_error)
        assert (mean_error < 1e-5).all(), mean_error.max()

        return rot_mat 

    def rectify_pose(self, pose, R_rot):
        pose = pose.copy()

        R_root = cv2.Rodrigues(pose[:3])[0]
        # new_root = np.linalg.inv(R_abs).dot(R_root)
        new_root = R_rot.dot(R_root)
        pose[:3] = cv2.Rodrigues(new_root)[0].reshape(3)
        return pose

    def calculate_projection(self, xyz, uv, xyz_hybrik=None, num_jts=55):
        # print(uv.max(), uv.min())
        # uv = uv - np.array([640, 360])
        uv_normed = uv / 100.0
        A = np.zeros((num_jts*3+1, 12+num_jts)) # a1, b1, c1, d1, a2, b2 ... d3, s1, s2, ..., s55
        b = np.zeros(num_jts*3+1)

        # for u
        A[:num_jts, :3] = xyz
        A[:num_jts, 3] = 1

        # for v
        A[num_jts:num_jts*2, 4:7] = xyz
        A[num_jts:num_jts*2, 7] = 1

        # for homo-coord
        A[num_jts*2:num_jts*3, 8:11] = xyz
        A[num_jts*2:num_jts*3, 11] = 1
        # A[num_jts:num_jts*2, -1] = -1

        for i in range(num_jts):
            A[i, 12+i] = -uv_normed[i, 0]
            A[num_jts+i, 12+i] = -uv_normed[i, 1]
            A[num_jts*2+i, 12+i] = -1
        
        # avoid all-zero solution
        A[-1, :] = 1
        b[-1] = 1

        A_s = np.dot(A.T, A)
        b_s = np.dot(A.T, b)

        cam_para_raw = np.linalg.solve(A_s, b_s) # 13

        cam_para = cam_para_raw[:12].reshape(3, 4)
        scale = cam_para_raw[12:]

        cam_para[:2] = cam_para[:2] * 100.0

        if num_jts == 55:
            # smplx case
            xyz_hybrik_homo = np.ones((70, 4))
        else:
            # smpl case
            xyz_hybrik_homo = np.ones((29, 4))
        
        xyz_hybrik_homo[:, :3] = xyz_hybrik
        pred_uv_homo = np.einsum('ij,bj->bi', cam_para, xyz_hybrik_homo)
        # print(pred_uv_homo)
        pred_uv = pred_uv_homo[:, :2] / pred_uv_homo[:, [2]]

        assert np.absolute(pred_uv[:num_jts] - uv).max() < 1e-2, f'{pred_uv[:num_jts] - uv}, {np.absolute(pred_uv[:num_jts] - uv).max()}'
        # print(cam_para)
        return cam_para, pred_uv
        

######################### GENERATE SMPL FILES ############################

WITH_KID = True
#### train
annotation_files = [
    f'data/AGORA/annotations/Cam/train_{i}_SMPL_withjv.pkl' for i in range(10)
]
final_saved_path = 'exp/video_predict/AGORA/train_all_SMPL_withjv_withkid.pt'

#### val
# annotation_files = [
#     f'data/AGORA/annotations/validation_Cam/Cam/validation_{i}_SMPL_withjv.pkl' for i in range(10)
# ]
# final_saved_path = 'exp/video_predict/AGORA/validation_all_SMPL_withjv_withkid.pt'


######################### GENERATE SMPLX FILES ############################
#### val
# annotation_files = [
#     f'data/AGORA/annotations/validation_Cam/Cam/validation_{i}_SMPLX_withjv.pkl' for i in range(10)
# ]
# final_saved_path = 'exp/video_predict/AGORA/validation_all_SMPLX_withjv.pt'
#### train
# annotation_files = [
#     f'data/AGORA/annotations/Cam/train_{i}_SMPLX_withjv.pkl' for i in range(10)
# ]
# final_saved_path = 'exp/video_predict/AGORA/train_all_SMPLX_withjv_withkid.pt'

use_gendered_smplx=True
if use_gendered_smplx and 'SMPLX' in final_saved_path:
    final_saved_path = final_saved_path.split('.')[0] + '_gendered.pt'


if 'SMPLX' not in final_saved_path:
    pt_keys = [
        'ann_path', 'img_path', 'gender', 'occlusion', 'is_valid', 
        'xyz_17', 'xyz_29', 'twist_angle', 'pose', 'shape', 'uv_24', 'gt_joints_3d',
        'is_kid', 'shape_kid', 'uv_29', 'cam_param'
    ]
else:
    pt_keys = [
        'ann_path', 'img_path', 'gender', 'occlusion', 'is_valid', 
        'joints_hybrik', 'joints_full', 'twist_angle', 'full_pose', 'shape', 'expression', 
        'gt_joints_2d', 'gt_joints_3d', 'gendered_smpl', 'use_mean_pose',
        'is_kid', 'shape_kid', 'uv_hybrik', 'cam_param'
    ]


if __name__ == "__main__":

    for i, annotation_file in enumerate(annotation_files):
        dataset = naive_dataset(annotation_file, use_gendered_smplx=use_gendered_smplx, use_kid=WITH_KID)
        print(len(dataset))

        loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
                            num_workers=8, drop_last=False, pin_memory=True)


        new_db = {
            k: [] for k in pt_keys
        }
        for item in tqdm(loader, dynamic_ncols=True):
            for k in pt_keys:
                v = item[k][0] 
                if isinstance(v, torch.Tensor):
                    v = v.cpu().numpy()
                
                new_db[k].append(v)

            gc.collect()

        for k in new_db.keys():
            if isinstance(new_db[k][0], np.ndarray):
                v = np.stack(new_db[k], axis=0)

                new_db[k] = v
            
            
        file_name = annotation_file.split('/')[-1]
        if use_gendered_smplx and 'SMPLX' in final_saved_path:
            new_file_name = file_name.split('.')[0] + '_gendered.pt'
        else:
            new_file_name = file_name.split('.')[0] + '.pt'

        new_db_path = os.path.join('exp/video_predict/AGORA', new_file_name)
        joblib.dump(new_db, new_db_path)
        torch.cuda.empty_cache()

        for k, v in new_db.items():
            print(k, len(v))
    
    # ########## fuse all pt
    pt_final = {k: [] for k in pt_keys}
    new_db_paths = []
    for annotation_file in tqdm(annotation_files):
        file_name = annotation_file.split('/')[-1]
        if use_gendered_smplx and 'SMPLX' in final_saved_path:
            new_file_name = file_name.split('.')[0] + '_gendered.pt'
        # elif WITH_KID:
        #     new_file_name = file_name.split('.')[0] + '_withkid.pt'
        else:
            new_file_name = file_name.split('.')[0] + '.pt'
        
        new_db_path = os.path.join('exp/video_predict/AGORA', new_file_name)
        new_db_paths.append(new_db_path)

        now_db = joblib.load(new_db_path, 'r')
        for k in pt_keys:
            if isinstance(now_db[k], list):
                pt_final[k] = pt_final[k] + now_db[k]
            else:
                assert isinstance(now_db[k], np.ndarray), type(now_db[k])
                pt_final[k].append(now_db[k])
    
    for k in pt_keys:
        if isinstance(now_db[k], np.ndarray):
            pt_final[k] = np.concatenate(pt_final[k], axis=0)

    joblib.dump(pt_final, final_saved_path)
    print(final_saved_path)


    ###### fuse all pt, and save only valid ones
    final_db = joblib.load(final_saved_path, 'r')
    valid_final_db = {k: [] for k, _ in final_db.items()}

    db_len = len(final_db['img_path'])
    print('original db_len', db_len)
    for i in tqdm(range(db_len)):
        is_valid = final_db['is_valid'][i]
        if is_valid:
            for k, v in final_db.items():
                valid_final_db[k].append(v[i])
        
    for k in pt_keys:
        if isinstance(valid_final_db[k][0], np.ndarray):
            valid_final_db[k] = np.stack(valid_final_db[k], axis=0)
    
    print('valid db_len', len(valid_final_db['img_path']))
    valid_final_saved_path = final_saved_path.split('.')[0] + '_valid.pt'
    joblib.dump(valid_final_db, valid_final_saved_path)
    print(valid_final_saved_path)

from niki.

cyk990422 avatar cyk990422 commented on June 3, 2024

Sorry, SMPL_layer_kid is not included in the library you mentioned

from niki.

cyk990422 avatar cyk990422 commented on June 3, 2024

Sorry, SMPL_layer_kid is not included in the library you mentioned

from niki.

cyk990422 avatar cyk990422 commented on June 3, 2024

At the same time, I also noticed that some point coordinates of gt_joints_2d are negative numbers. Will this have any impact?

from niki.

MohammadKhalid avatar MohammadKhalid commented on June 3, 2024

At the same time, I also noticed that some point coordinates of gt_joints_2d are negative numbers. Will this have any impact?

Just apply the absolute function. 2d joints are pixel values and hence must always be positive.

from niki.

cyk990422 avatar cyk990422 commented on June 3, 2024

At the same time, I also noticed that some point coordinates of gt_joints_2d are negative numbers. Will this have any impact?

Just apply the absolute function. 2d joints are pixel values and hence must always be positive.

Should I perform this step directly in the code? gt_joints_2d = abs( (self.df.iloc[img_idx]['gt_joints_2d'])[idx_win_img])

from niki.

MohammadKhalid avatar MohammadKhalid commented on June 3, 2024

@biansy000 I couldn't find SMPL_layer_kid in the HybrIK code.

from niki.

biansy000 avatar biansy000 commented on June 3, 2024

The code of SMPL_layer_kid:

class SMPL_layer_kid(SMPL_layer):
    def __init__(self,
                 model_path,
                 kid_template_path,
                 h36m_jregressor,
                 gender='neutral',
                 dtype=torch.float32,
                 num_joints=29):
        
        super(SMPL_layer_kid, self).__init__(
                model_path,
                h36m_jregressor,
                gender=gender,
                dtype=dtype,
                num_joints=num_joints
            )
        num_betas = 10
        v_template_smil = np.load(kid_template_path)
        v_template_smil -= np.mean(v_template_smil, axis=0)
        v_template_diff = np.expand_dims(v_template_smil - self.v_template.numpy(), axis=2)

        shapedirs = self.shapedirs.numpy()
        shapedirs = np.concatenate((shapedirs[:, :, :num_betas], v_template_diff), axis=2)
        num_betas = num_betas + 1

        self._num_betas = num_betas
        shapedirs = shapedirs[:, :, :num_betas]
        # The shape components
        self.register_buffer(
            'shapedirs',
            to_tensor(to_np(shapedirs), dtype=dtype))

from niki.

biansy000 avatar biansy000 commented on June 3, 2024

At the same time, I also noticed that some point coordinates of gt_joints_2d are negative numbers. Will this have any impact?

Just apply the absolute function. 2d joints are pixel values and hence must always be positive.

At the same time, I also noticed that some point coordinates of gt_joints_2d are negative numbers. Will this have any impact?

No you should not apply the absolute function. Just leave it as the negative number.
Sometimes some part of the human is outside the image. In this case, the gt_joints_2d may be negative.

from niki.

MohammadKhalid avatar MohammadKhalid commented on June 3, 2024

Thanks. Issue resolved. Closing.

from niki.

Related Issues (20)

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.