Pytorch

[PyTorch] 批量训练数据的一个流程

2018-11-11  本文已影响139人  VanJordan

加载数据和RNN的批量处理(decoder)

使用DataLoader加载数据

trainloader = torch.utils.data.DataLoader(dataset=Dataset(FILEPATH,config.trainDataNum),\
batch_size=config.batchSize,shuffle=False,collate_fn=collate_fun)

Dataset类必须是继承自torch.utils.data.Dataset,而且要重载两个函数__getitem____len__,一个用来加载单个数据,一个用来获取数据集总的数目。

class Dataset(torch.utils.data.Dataset):

    def __init__(self, filepath=None,dataLen=None,voc=None):
        self.file = filepath
        self.dataLen = dataLen
        self.voc = voc
        
    def __getitem__(self, index):
        A,B,path,hop = linecache.getline(self.file, index+1).split('\t')
        return self.voc[A],self.voc[B],[self.voc[i] for i in path.split(' ')],int(hop.strip('\n'))

    def __len__(self):
        return self.dataLen

def collate_fun(data):
    A,B,path,hop=zip(*data)
    pathLen = [len(p) for p in path]
    idx = sorted(enumerate(pathLen),key=lambda x:x[1],reverse=True)
    idx=[i[0] for i in idx]
    pathLen=[pathLen[i] for i in idx]
    A = [A[i] for i in idx]
    B = [B[i] for i in idx]
    path = [path[i] for i in  idx]
    hop = [hop[i] for i in idx]
    pathPad = torch.zeros(len(path),max(pathLen)).long()
    for i,s in enumerate(path):
        end = pathLen[i]
        pathPad[i,:end] = torch.LongTensor(s)
    return torch.LongTensor(A),torch.LongTensor(B),pathPad.t(),torch.LongTensor(pathLen),torch.LongTensor(hop)

"""
['really', 'fake', 'DistinctFrom', '1\n']
['really', 'genuine', 'DistinctFrom fake Antonym', '2\n']
['really', 'flog', 'DistinctFrom fake DerivedFrom', '2\n']
['really', 'sports', 'DistinctFrom fake HasContext', '2\n']
['really', 'nautical', 'DistinctFrom fake HasContext', '2\n']
['really', 'imitation', 'DistinctFrom fake IsA', '2\n']
['really', 'false', 'DistinctFrom fake RelatedTo', '2\n']
['really', 'fraudulent', 'DistinctFrom fake RelatedTo', '2\n']
['really', 'advantage', 'DistinctFrom fake RelatedTo', '2\n']
['really', 'deceive', 'DistinctFrom fake RelatedTo', '2\n']
A: ('really', 'really', 'really', 'really', 'really', 'really', 'really', 'really', 'really', 'really')  
B: ('fake', 'genuine', 'flog', 'sports', 'nautical', 'imitation', 'false', 'fraudulent', 'advantage', 'deceive')  
path: (['DistinctFrom'], ['DistinctFrom', 'fake', 'Antonym'], ['DistinctFrom', 'fake', 'DerivedFrom'], ['DistinctFrom', 'fake', 'HasContext'], ['DistinctFrom', 'fake', 'HasContext'], ['DistinctFrom', 'fake', 'IsA'], ['DistinctFrom', 'fake', 'RelatedTo'], ['DistinctFrom', 'fake', 'RelatedTo'], ['DistinctFrom', 'fake', 'RelatedTo'], ['DistinctFrom', 'fake', 'RelatedTo'])  
hop: (1, 2, 2, 2, 2, 2, 2, 2, 2, 2)
"""
for index,data in enumerate(trainloader):
    A,B,path,lens,hop=map(lambda x:x.to(device),data)
    optimizer_f.zero_grad()
    optimizer_p2v.zero_grad()
    embeddingPath=p2v(path,lens)
            outputs=f(embedding(A.to(device)),embedding(B.to(device)),embeddingPath)
            loss=criterion(outputs.squeeze(1), torch.log(torch.add(hop.to(device),1).to(dtype=torch.float)))
class Path2Vec(nn.Module):  #we use a LSTM network generate a vector which is corresponding to a path  [node 1,relation 1, node 2] to 
    def __init__(self,embedding,hiddenLen):
        super(Path2Vec,self).__init__()
        self.hiddenLen=hiddenLen
        self.embedding = embedding
        self.gru = nn.GRU(hiddenLen,hiddenLen)
    def forward(self,path,lens):
        packed = torch.nn.utils.rnn.pack_padded_sequence(embedding(path),lens)
        output,_ = self.gru(packed)  
        output,lens=torch.nn.utils.rnn.pad_packed_sequence(output)
        return torch.div(output.sum(0),lens.to(device=device,dtype=torch.float).unsqueeze(1))
pack所做的操作,pad是pack的逆操作,要求一个batch里面的数据按照句子长短进行降序!

decoder端

encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
...
for t in range(max_target_len):
    decoder_output, decoder_hidden = decoder(
        decoder_input, decoder_hidden, encoder_outputs
    )
    # No teacher forcing: next input is decoder's own current output
    _, topi = decoder_output.topk(1)
    decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]]).to(device)
    # Calculate and accumulate loss
    mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
    loss += mask_loss

loss端

inpdecoder端每个时间步用模型跑出来的,target是这个时间步一个batch的真实数据,mask是这个时间步这个batchByteTensor类型的掩码。

def maskNLLLoss(inp, target, mask):
    nTotal = mask.sum()
    crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)))
    loss = crossEntropy.masked_select(mask).mean()
    loss = loss.to(device)
    return loss, nTotal.item()
mask的制作

上面我的代码中其实是没有mask矩阵的,因为只相当于一个encoder,并没有decoder,所以在训练的时候也不用mask矩阵使得decoder产生真实的数据以后再和target对比,使用mask矩阵的目的是将padding的部分mask掉。

paddingIdx =0
sentBatch=[[1,2,3],[9,3],[2,5,8,9]]  #一个batch中的句子,数字表示单词在词汇表中的下标,0表示填充
sentBatch.sort(key=lambda x:len(x),reverse=True)  #为了能使用pack
sentBatchPad=list(itertools.zip_longest(*sentBatch,fillvalue=paddingIdx))#翻转并填充
a=torch.tensor(sentBatchPad)
b=a.ne(torch.tensor(paddingIdx)).byte()#torch.tensor(paddingIdx)是一个标量
print(b)

官网里面chatbot教程里面制作掩码矩阵的方法,感觉我的方法好像更加简单一点。

def zeroPadding(l, fillvalue=PAD_token):
    return list(itertools.zip_longest(*l, fillvalue=fillvalue))

def binaryMatrix(l, value=PAD_token):
    m = []
    for i, seq in enumerate(l):
        m.append([])
        for token in seq:
            if token == PAD_token:
                m[i].append(0)
            else:
                m[i].append(1)
    return m
mask = binaryMatrix(padList)
mask = torch.ByteTensor(mask)
上一篇 下一篇

猜你喜欢

热点阅读