def load_word_embeddings()

in utils/utils.py [0:0]


def load_word_embeddings(emb_file, vocab):

    vocab = [v.lower() for v in vocab]

    embeds = {}
    for line in open(emb_file, 'r'):
        line = line.strip().split(' ')
        wvec = torch.FloatTensor(list(map(float, line[1:])))
        embeds[line[0]] = wvec

    # for zappos (should account for everything)
    custom_map = {
        'Faux.Fur': 'fur',
        'Faux.Leather': 'leather',
        'Full.grain.leather': 'leather',
        'Hair.Calf': 'hair',
        'Patent.Leather': 'leather',
        'Nubuck': 'leather',
        'Boots.Ankle': 'boots',
        'Boots.Knee.High': 'knee-high',
        'Boots.Mid-Calf': 'midcalf',
        'Shoes.Boat.Shoes': 'shoes',
        'Shoes.Clogs.and.Mules': 'clogs',
        'Shoes.Flats': 'flats',
        'Shoes.Heels': 'heels',
        'Shoes.Loafers': 'loafers',
        'Shoes.Oxfords': 'oxfords',
        'Shoes.Sneakers.and.Athletic.Shoes': 'sneakers',
        'traffic_light': 'light',
        'trash_can': 'trashcan'
    }
    for k in custom_map:
        embeds[k.lower()] = embeds[custom_map[k]]

    embeds = [embeds[k] for k in vocab]
    embeds = torch.stack(embeds)
    print('loaded embeddings', embeds.size())

    return embeds