ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

【自然语言处理五】Transformer模型

2021-12-31 15:03:13  阅读:200  来源: 互联网

标签:pre src Transformer 模型 trg hidden 自然语言 self size


1.transformer模型

Transformer 是 Google 的团队在 2017 年提出的一种 NLP 经典模型,现在比较火热的 Bert 也是基于 Transformer。Transformer 模型使用了 Self-Attention 机制,不采用 RNN 的顺序结构,使得模型可以并行化训练,而且能够拥有全局信息。

在这里插入图片描述

2.encoder部分实现(pytorch)

class EncoderLayer(nn.Module):
    def __init__(self, hidden_size, filter_size, n_head, pre_lnorm, device, dropout):
        super(EncoderLayer, self).__init__()
        # self-attention part
        self.self_attn = MultiHeadAttention(hidden_size, n_head, device)
        self.self_attn_norm = nn.LayerNorm(hidden_size)

        # feed forward network part
        self.pff = PositionwiseFeedForward(hidden_size, filter_size, dropout)
        self.pff_norm = nn.LayerNorm(hidden_size)

        self.pre_lnorm = pre_lnorm

    def forward(self, src, src_mask):
        if self.pre_lnorm:
            pre = self.self_attn_norm(src)
            # residual connection
            src = src + self.self_attn(pre, pre, pre, src_mask)

            pre = self.pff_norm(src)
            src = src + self.pff(pre)  # residual connection
        else:
            # residual connection + layerNorm
            src = self.self_attn_norm(
                src + self.self_attn(src, src, src, src_mask))
            # residual connection + layerNorm
            src = self.pff_norm(src + self.pff(src))

        return src


class Encoder(nn.Module):
    def __init__(self, input_size, hidden_size, filter_size, n_head, dropout, n_layers, pre_lnorm, device):
        super(Encoder, self).__init__()
        self.hidden_size = hidden_size
        self.embed_scale = hidden_size ** 0.5
        self.wte = nn.Embedding(input_size, hidden_size)  # token embeddings
        # self.wpe = PositionalEmbedding(hidden_size) # positional embeddings
        # self.wpe = nn.Embedding(1000, hidden_size)
        # self.wpe = PositionalEncoding(hidden_size)
        max_len = 1000
        self.wpe = nn.Embedding.from_pretrained(positional_encoding_table(
            max_len+1, hidden_size, padding_idx=ZH.vocab.stoi['<pad>']), freeze=True)
        self.embed_dropout = nn.Dropout(dropout)
        self.layers = nn.ModuleList([EncoderLayer(hidden_size, filter_size, n_head, pre_lnorm, device, dropout)
                                     for _ in range(n_layers)])
        self.pre_lnorm = pre_lnorm
        self.last_norm = nn.LayerNorm(hidden_size)
        self.device = device

    def forward(self, src, src_mask):
        # token embedding + positional encoding
        # pos = torch.arange(src.shape[1], dtype=torch.float32).to(self.device)
        pos = torch.arange(0, src.shape[1]).unsqueeze(
            0).repeat(src.shape[0], 1).to(self.device)
        src = self.wte(src) * self.embed_scale + self.wpe(pos)  # [B, T, H]
        src = self.embed_dropout(src)

        for layer in self.layers:
            src = layer(src, src_mask)

        if self.pre_lnorm:
            src = self.last_norm(src)

        return src        

3.decoder部分实现

class DecoderLayer(nn.Module):
    def __init__(self, hidden_size, filter_size, n_head, pre_lnorm, device, dropout):
        super(DecoderLayer, self).__init__()
        # self-attention part
        self.self_attn = MultiHeadAttention(hidden_size, n_head, device)
        self.self_attn_norm = nn.LayerNorm(hidden_size)

        # encoder-to-decoder self-attention part
        self.ed_self_attn = MultiHeadAttention(hidden_size, n_head, device)
        self.ed_self_attn_norm = nn.LayerNorm(hidden_size)

        # feed forward network part
        self.pff = PositionwiseFeedForward(hidden_size, filter_size, dropout)
        self.pff_norm = nn.LayerNorm(hidden_size)

        self.pre_lnorm = pre_lnorm

    def forward(self, enc_out, enc_out_mask, trg, trg_mask):
        if self.pre_lnorm:
          #            print("iftrg",trg.shape,self.pre_lnorm)
            ris = self.self_attn_norm(trg)
            trg = trg + self.self_attn(ris, ris, ris, trg_mask)

            ris = self.ed_self_attn_norm(trg)
            trg = trg + self.ed_self_attn(ris, enc_out, enc_out, enc_out_mask)

            ris = self.pff_norm(trg)
            trg = trg + self.pff(ris)
        else:
          #            print("trg",trg.shape,trg_mask.shape,self.pre_lnorm)
            trg = self.self_attn_norm(
                trg + self.self_attn(trg, trg, trg, trg_mask))

            trg = self.ed_self_attn_norm(
                trg + self.ed_self_attn(trg, enc_out, enc_out, enc_out_mask))
            trg = self.pff_norm(trg + self.pff(trg))

        return trg

class Decoder(nn.Module):
    def __init__(self, input_size, hidden_size, filter_size, n_head, dropout, n_layers, pre_lnorm, device):
        super(Decoder, self).__init__()
        self.hidden_size = hidden_size
        self.dropout = dropout
        self.embed_scale = hidden_size ** 0.5
        self.wte = nn.Embedding(input_size, hidden_size)  # token embeddings
        # self.wpe = PositionalEmbedding(hidden_size) # positional embeddings
        # self.wpe = nn.Embedding(1000, hidden_size)
        # self.wpe = PositionalEncoding(hidden_size)
        max_len = 1000
        self.wpe = nn.Embedding.from_pretrained(positional_encoding_table(
            max_len+1, hidden_size, padding_idx=ENG.vocab.stoi['<pad>']), freeze=True)
        self.embed_dropout = nn.Dropout(dropout)
        self.layers = nn.ModuleList([DecoderLayer(hidden_size, filter_size, n_head, pre_lnorm, device, dropout)
                                     for _ in range(n_layers)])
        self.pre_lnorm = pre_lnorm
        self.last_norm = nn.LayerNorm(hidden_size)
        self.device = device

    def forward(self, enc_out, enc_out_mask, trg, trg_mask):
        # token embedding + positional encoding
        # pos = torch.arange(trg.shape[1], dtype=torch.float32).to(self.device)
        pos = torch.arange(0, trg.shape[1]).unsqueeze(
            0).repeat(trg.shape[0], 1).to(self.device)
        trg = self.wte(trg) * self.embed_scale + self.wpe(pos)  # [B, T, H]
        trg = self.embed_dropout(trg)

        #trg [B, T, H]
        for layer in self.layers:
            trg = layer(enc_out, enc_out_mask, trg, trg_mask)

        if self.pre_lnorm:
            trg = self.last_norm(trg)
        return trg
        

标签:pre,src,Transformer,模型,trg,hidden,自然语言,self,size
来源: https://blog.csdn.net/moo611/article/details/122234867

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有