12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619 |
- # Copyright (c) Microsoft Corporation.
- # SPDX-License-Identifier: Apache-2.0
- # DeepSpeed Team
- from __future__ import absolute_import, division, print_function, unicode_literals
- # Copyright The Microsoft DeepSpeed Team
- # DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f
- # https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py
- # coding=utf-8
- # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """PyTorch BERT model."""
- import copy
- import json
- import logging
- import math
- import os
- import shutil
- import tarfile
- import tempfile
- from io import open
- import torch
- from torch import nn
- from torch.nn import CrossEntropyLoss
- from torch.utils import checkpoint
- import deepspeed.comm as dist
- from torch.nn import Module
- import torch.nn.functional as F
- import torch.nn.init as init
- from deepspeed.accelerator import get_accelerator
- #from numba import cuda
- #from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax
- logger = logging.getLogger(__name__)
- PRETRAINED_MODEL_ARCHIVE_MAP = {
- 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
- 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
- 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
- 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
- 'bert-base-multilingual-uncased':
- "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
- 'bert-base-multilingual-cased':
- "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
- 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
- }
- CONFIG_NAME = 'bert_config.json'
- WEIGHTS_NAME = 'pytorch_model.bin'
- TF_WEIGHTS_NAME = 'model.ckpt'
- def load_tf_weights_in_bert(model, tf_checkpoint_path):
- """ Load tf checkpoints in a pytorch model
- """
- try:
- import re
- import numpy as np
- import tensorflow as tf
- except ImportError:
- print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
- "https://www.tensorflow.org/install/ for installation instructions.")
- raise
- tf_path = os.path.abspath(tf_checkpoint_path)
- print("Converting TensorFlow checkpoint from {}".format(tf_path))
- # Load weights from TF model
- init_vars = tf.train.list_variables(tf_path)
- names = []
- arrays = []
- for name, shape in init_vars:
- print("Loading TF weight {} with shape {}".format(name, shape))
- array = tf.train.load_variable(tf_path, name)
- names.append(name)
- arrays.append(array)
- for name, array in zip(names, arrays):
- name = name.split('/')
- # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
- # which are not required for using pretrained model
- if any(n in ["adam_v", "adam_m"] for n in name):
- print("Skipping {}".format("/".join(name)))
- continue
- pointer = model
- for m_name in name:
- if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
- l = re.split(r'_(\d+)', m_name)
- else:
- l = [m_name]
- if l[0] == 'kernel' or l[0] == 'gamma':
- pointer = getattr(pointer, 'weight')
- elif l[0] == 'output_bias' or l[0] == 'beta':
- pointer = getattr(pointer, 'bias')
- elif l[0] == 'output_weights':
- pointer = getattr(pointer, 'weight')
- else:
- pointer = getattr(pointer, l[0])
- if len(l) >= 2:
- num = int(l[1])
- pointer = pointer[num]
- if m_name[-11:] == '_embeddings':
- pointer = getattr(pointer, 'weight')
- elif m_name == 'kernel':
- array = np.transpose(array)
- try:
- assert pointer.shape == array.shape
- except AssertionError as e:
- e.args += (pointer.shape, array.shape)
- raise
- print("Initialize PyTorch weight {}".format(name))
- pointer.data = torch.from_numpy(array)
- return model
- """
- @torch.jit.script
- def f_gelu(x):
- return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
- @torch.jit.script
- def bias_gelu(bias, y):
- x = bias + y
- return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
- @torch.jit.script
- def bias_tanh(bias, y):
- x = bias + y
- return torch.tanh(x)
- """
- def f_gelu(x):
- x_type = x.dtype
- x = x.float()
- x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
- return x.to(x_type)
- def bias_gelu(bias, y):
- y_type = y.dtype
- x = bias.float() + y.float()
- x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
- return x.to(y_type)
- def bias_tanh(bias, y):
- y_type = y.dtype
- x = bias.float() + y.float()
- x = torch.tanh(x)
- return x.to(y_type)
- def gelu(x):
- """Implementation of the gelu activation function.
- For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
- 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
- Also see https://arxiv.org/abs/1606.08415
- """
- return f_gelu(x)
- def swish(x):
- return x * torch.sigmoid(x)
- ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
- class GPUTimer:
- def __init__(self):
- super().__init__()
- self.start = get_accelerator().Event() # noqa: F821
- self.stop = get_accelerator().Event() # noqa: F821
- def record(self):
- self.start.record()
- def elapsed(self):
- self.stop.record()
- self.stop.synchronize()
- return self.start.elapsed_time(self.stop) / 1000.0
- class LinearActivation(Module):
- r"""Fused Linear and activation Module.
- """
- __constants__ = ['bias']
- def __init__(self, in_features, out_features, weights, biases, act='gelu', bias=True):
- super(LinearActivation, self).__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.fused_gelu = False
- self.fused_tanh = False
- if isinstance(act, str):
- if bias and act == 'gelu':
- self.fused_gelu = True
- elif bias and act == 'tanh':
- self.fused_tanh = True
- else:
- self.act_fn = ACT2FN[act]
- else:
- self.act_fn = act
- #self.weight = Parameter(torch.Tensor(out_features, in_features))
- self.weight = weights[5]
- self.bias = biases[5]
- #if bias:
- # self.bias = Parameter(torch.Tensor(out_features))
- #else:
- # self.register_parameter('bias', None)
- #self.reset_parameters()
- def reset_parameters(self):
- init.kaiming_uniform_(self.weight, a=math.sqrt(5))
- if self.bias is not None:
- fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
- bound = 1 / math.sqrt(fan_in)
- init.uniform_(self.bias, -bound, bound)
- def forward(self, input):
- if self.fused_gelu:
- #timing = []
- #t1 = GPUTimer()
- #t1.record()
- y = F.linear(input, self.weight, None)
- #timing.append(t1.elapsed())
- #t1.record()
- bg = bias_gelu(self.bias, y)
- #timing.append(t1.elapsed())
- return bg
- elif self.fused_tanh:
- return bias_tanh(self.bias, F.linear(input, self.weight, None))
- else:
- return self.act_fn(F.linear(input, self.weight, self.bias))
- def extra_repr(self):
- return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
- is not None)
- class BertConfig(object):
- """Configuration class to store the configuration of a `BertModel`.
- """
- def __init__(self,
- vocab_size_or_config_json_file,
- hidden_size=768,
- num_hidden_layers=12,
- num_attention_heads=12,
- intermediate_size=3072,
- batch_size=8,
- hidden_act="gelu",
- hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- max_position_embeddings=512,
- type_vocab_size=2,
- initializer_range=0.02,
- fp16=False):
- """Constructs BertConfig.
- Args:
- vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
- hidden_size: Size of the encoder layers and the pooler layer.
- num_hidden_layers: Number of hidden layers in the Transformer encoder.
- num_attention_heads: Number of attention heads for each attention layer in
- the Transformer encoder.
- intermediate_size: The size of the "intermediate" (i.e., feed-forward)
- layer in the Transformer encoder.
- hidden_act: The non-linear activation function (function or string) in the
- encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
- hidden_dropout_prob: The dropout probability for all fully connected
- layers in the embeddings, encoder, and pooler.
- attention_probs_dropout_prob: The dropout ratio for the attention
- probabilities.
- max_position_embeddings: The maximum sequence length that this model might
- ever be used with. Typically set this to something large just in case
- (e.g., 512 or 1024 or 2048).
- type_vocab_size: The vocabulary size of the `token_type_ids` passed into
- `BertModel`.
- initializer_range: The sttdev of the truncated_normal_initializer for
- initializing all weight matrices.
- """
- if isinstance(vocab_size_or_config_json_file, str):
- with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
- json_config = json.loads(reader.read())
- for key, value in json_config.items():
- self.__dict__[key] = value
- elif isinstance(vocab_size_or_config_json_file, int):
- self.vocab_size = vocab_size_or_config_json_file
- self.hidden_size = hidden_size
- self.num_hidden_layers = num_hidden_layers
- self.num_attention_heads = num_attention_heads
- self.batch_size = batch_size
- self.hidden_act = hidden_act
- self.intermediate_size = intermediate_size
- self.hidden_dropout_prob = hidden_dropout_prob
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
- self.max_position_embeddings = max_position_embeddings
- self.type_vocab_size = type_vocab_size
- self.initializer_range = initializer_range
- self.fp16 = fp16
- else:
- raise ValueError("First argument must be either a vocabulary size (int)"
- "or the path to a pretrained model config file (str)")
- @classmethod
- def from_dict(cls, json_object):
- """Constructs a `BertConfig` from a Python dictionary of parameters."""
- config = BertConfig(vocab_size_or_config_json_file=-1)
- for key, value in json_object.items():
- config.__dict__[key] = value
- return config
- @classmethod
- def from_json_file(cls, json_file):
- """Constructs a `BertConfig` from a json file of parameters."""
- with open(json_file, "r", encoding='utf-8') as reader:
- text = reader.read()
- return cls.from_dict(json.loads(text))
- def __repr__(self):
- return str(self.to_json_string())
- def to_dict(self):
- """Serializes this instance to a Python dictionary."""
- output = copy.deepcopy(self.__dict__)
- return output
- def to_json_string(self):
- """Serializes this instance to a JSON string."""
- return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
- try:
- import apex
- #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
- import apex.normalization
- #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
- BertLayerNorm = apex.normalization.FusedLayerNorm
- except ImportError:
- print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
- class BertLayerNorm(nn.Module):
- def __init__(self, hidden_size, eps=1e-12):
- """Construct a layernorm module in the TF style (epsilon inside the square root).
- """
- super(BertLayerNorm, self).__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.bias = nn.Parameter(torch.zeros(hidden_size))
- self.variance_epsilon = eps
- def forward(self, x):
- pdtype = x.dtype
- x = x.float()
- u = x.mean(-1, keepdim=True)
- s = (x - u).pow(2).mean(-1, keepdim=True)
- x = (x - u) / torch.sqrt(s + self.variance_epsilon)
- return self.weight * x.to(pdtype) + self.bias
- #def forward(self, x):
- # u = x.mean(-1, keepdim=True)
- # s = (x - u).pow(2).mean(-1, keepdim=True)
- # x = (x - u) / torch.sqrt(s + self.variance_epsilon)
- # return self.weight * x + self.bias
- class BertEmbeddings(nn.Module):
- """Construct the embeddings from word, position and token_type embeddings.
- """
- def __init__(self, config):
- super(BertEmbeddings, self).__init__()
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- def forward(self, input_ids, token_type_ids=None):
- seq_length = input_ids.size(1)
- position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
- position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
- words_embeddings = self.word_embeddings(input_ids)
- position_embeddings = self.position_embeddings(position_ids)
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
- embeddings = words_embeddings + position_embeddings + token_type_embeddings
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
- class BertSelfAttention(nn.Module):
- def __init__(self, i, config, weights, biases):
- super(BertSelfAttention, self).__init__()
- if config.hidden_size % config.num_attention_heads != 0:
- raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
- "heads (%d)" % (config.hidden_size, config.num_attention_heads))
- self.num_attention_heads = config.num_attention_heads
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
- self.query = nn.Linear(config.hidden_size, self.all_head_size)
- self.query.weight = weights[0]
- self.query.bias = biases[0]
- self.key = nn.Linear(config.hidden_size, self.all_head_size)
- self.key.weight = weights[1]
- self.key.bias = biases[1]
- self.value = nn.Linear(config.hidden_size, self.all_head_size)
- self.value.weight = weights[2]
- self.value.bias = biases[2]
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
- self.softmax = nn.Softmax(dim=-1)
- #self.softmax_config = DeepSpeedSoftmaxConfig()
- #self.softmax_config.batch_size = config.batch_size
- #self.softmax_config.max_seq_length = config.max_position_embeddings
- #self.softmax_config.hidden_size = config.hidden_size
- #self.softmax_config.heads = config.num_attention_heads
- #self.softmax_config.softmax_id = i
- #self.softmax_config.fp16 = config.fp16
- #self.softmax_config.prob_drop_out = 0.0
- #self.softmax = DeepSpeedSoftmax(i, self.softmax_config)
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
- def transpose_key_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 3, 1)
- def forward(self, hidden_states, attention_mask, grads=None):
- #timing = []
- #t1 = GPUTimer()
- #t1.record()
- mixed_query_layer = self.query(hidden_states)
- #timing.append(t1.elapsed())
- #print("Query elapsed: %s" % (time.clock() - start))
- #t1.record()
- mixed_key_layer = self.key(hidden_states)
- #timing.append(t1.elapsed())
- #print("Key elapsed: %s" % (time.clock() - start))
- #t1.record()
- mixed_value_layer = self.value(hidden_states)
- #timing.append(t1.elapsed())
- #print("Value elapsed: %s" % (time.clock() - start))
- #t1.record()
- query_layer = self.transpose_for_scores(mixed_query_layer)
- # print(query_layer)
- #timing.append(t1.elapsed())
- #print("Query-Transform elapsed: %s" % (time.clock() - start))
- #t1.record()
- key_layer = self.transpose_key_for_scores(mixed_key_layer)
- # print(key_layer)
- #timing.append(t1.elapsed())
- #print("Key-Transform elapsed: %s" % (time.clock() - start))
- #t1.record()
- value_layer = self.transpose_for_scores(mixed_value_layer)
- #print(value_layer)
- #timing.append(t1.elapsed())
- #print("Value-Transform elapsed: %s" % (time.clock() - start))
- # Take the dot product between "query" and "key" to get the raw attention scores.
- #t1.record()
- #print(query_layer.shape)
- #print(key_layer.shape)
- attention_scores = torch.matmul(query_layer, key_layer)
- #print(attention_scores.shape)
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- #print("Pytorch: ", attention_scores)
- #timing.append(t1.elapsed())
- #print("Attention-Score elapsed: %s" % (time.clock() - start))
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
- #t1.record()
- # context_layer = self.softmax(query_layer, key_layer, value_layer, attention_mask)
- #print("context shape is :", context_layer.shape)
- #print("Cuda-ext:, ", attention_scores1)
- # Normalize the attention scores to probabilities.
- ####attention_probs = self.softmax(attention_scores)
- #timing.append(t1.elapsed())
- #print("Softmax elapsed: %s" % (time.clock() - start))
- #t1 = GPUTimer()
- #t1.record()
- attention_scores = attention_scores + attention_mask
- attention_probs = self.softmax(attention_scores)
- #attention_scores = self.softmax(attention_scores, attention_mask)
- #print("Softmax elapse {0:8.2f} ms", t1.elapsed() * 1000)
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs = self.dropout(attention_probs)
- #t1.record()
- context_layer = torch.matmul(attention_probs, value_layer)
- #timing.append(t1.elapsed())
- #print("Context elapsed: %s" % (time.clock() - start))
- #t1.record()
- #context_layer1 = context_layer.permute(
- # 0, 1, 3, 2, 4).contiguous()
- #if grads is not None:
- # context_layer.register_hook(lambda x, self = self : grads.append([x, "Context"]))
- context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, )
- context_layer1 = context_layer1.view(*new_context_layer_shape)
- #timing.append(t1.elapsed())
- #print("Context-Transform elapsed: %s" % (time.clock() - start))
- if grads is not None:
- query_layer.register_hook(lambda x, self=self: grads.append([x, "Query"]))
- key_layer.register_hook(lambda x, self=self: grads.append([x, "Key"]))
- value_layer.register_hook(lambda x, self=self: grads.append([x, "Value"]))
- return context_layer1
- class BertSelfOutput(nn.Module):
- def __init__(self, config, weights, biases):
- super(BertSelfOutput, self).__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.dense.weight = weights[3]
- self.dense.bias = biases[3]
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- def forward(self, hidden_states, input_tensor):
- #timing = []
- #t1 = GPUTimer()
- #t1.record()
- hidden_states = self.dense(hidden_states)
- #timing.append(t1.elapsed())
- #print("Attention Output elapsed: %s" % (time.clock() - start))
- hidden_states = self.dropout(hidden_states)
- #t1.record()
- #hidden_states = self.LayerNorm(hidden_states + input_tensor)
- #timing.append(t1.elapsed())
- #print("LayerNorm elapsed: %s" % (time.clock() - start))
- return hidden_states
- def get_w(self):
- return self.dense.weight
- class BertAttention(nn.Module):
- def __init__(self, i, config, weights, biases):
- super(BertAttention, self).__init__()
- self.self = BertSelfAttention(i, config, weights, biases)
- self.output = BertSelfOutput(config, weights, biases)
- def forward(self, input_tensor, attention_mask):
- self_output = self.self(input_tensor, attention_mask)
- attention_output = self.output(self_output, input_tensor)
- return attention_output
- def get_w(self):
- return self.output.get_w()
- class BertIntermediate(nn.Module):
- def __init__(self, config, weights, biases):
- super(BertIntermediate, self).__init__()
- self.dense_act = LinearActivation(config.hidden_size,
- config.intermediate_size,
- weights,
- biases,
- act=config.hidden_act)
- def forward(self, hidden_states):
- hidden_states = self.dense_act(hidden_states)
- return hidden_states
- class BertOutput(nn.Module):
- def __init__(self, config, weights, biases):
- super(BertOutput, self).__init__()
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.dense.weight = weights[6]
- self.dense.bias = biases[6]
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- def forward(self, hidden_states, input_tensor):
- #timing = []
- #t1 = GPUTimer()
- #t1.record()
- #print (hidden_states)
- #print (self.dense.weight)
- hidden_states = self.dense(hidden_states)
- #timing.append(t1.elapsed())
- #print("FF2 elapsed: %s" % (time.clock() - start))
- hidden_states = self.dropout(hidden_states)
- #t1.record()
- #hidden_states = self.LayerNorm(hidden_states + input_tensor)
- #timing.append(t1.elapsed())
- #print("LayerNorm elapsed: %s" % (time.clock() - start))
- return hidden_states
- class BertLayer(nn.Module):
- def __init__(self, i, config, weights, biases):
- super(BertLayer, self).__init__()
- self.attention = BertAttention(i, config, weights, biases)
- self.PreAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.PostAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.intermediate = BertIntermediate(config, weights, biases)
- self.output = BertOutput(config, weights, biases)
- self.weight = weights
- self.biases = biases
- def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False):
- input_layer_norm = self.PreAttentionLayerNorm(hidden_states)
- attention_output = self.attention(input_layer_norm, attention_mask)
- #print ("hidden shape is :", hidden_states.shape)
- intermediate_input = hidden_states + attention_output
- intermediate_layer_norm = self.PostAttentionLayerNorm(intermediate_input)
- intermediate_output = self.intermediate(intermediate_layer_norm)
- layer_output = self.output(intermediate_output, attention_output)
- #attention_output = self.attention(hidden_states, attention_mask)
- #intermediate_output = self.intermediate(attention_output)
- #layer_output = self.output(intermediate_output, attention_output)
- if collect_all_grads:
- # self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"]))
- # self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"]))
- # self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"]))
- # self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"]))
- self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"]))
- self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"]))
- self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"]))
- self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"]))
- self.PostAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
- self.PostAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
- self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"]))
- self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"]))
- self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"]))
- self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"]))
- self.PreAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
- self.PreAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
- return layer_output + intermediate_input
- def get_w(self):
- return self.attention.get_w()
- class BertEncoder(nn.Module):
- def __init__(self, config, weights, biases):
- super(BertEncoder, self).__init__()
- #layer = BertLayer(config, weights, biases)
- self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- self.layer = nn.ModuleList(
- [copy.deepcopy(BertLayer(i, config, weights, biases)) for i in range(config.num_hidden_layers)])
- self.grads = []
- self.graph = []
- def get_grads(self):
- return self.grads
- # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
- # all_encoder_layers = []
- # for layer_module in self.layer:
- # hidden_states = layer_module(hidden_states, attention_mask)
- # if output_all_encoded_layers:
- # all_encoder_layers.append(hidden_states)
- # if not output_all_encoded_layers:
- # all_encoder_layers.append(hidden_states)
- # return all_encoder_layers
- def get_modules(self, big_node, input):
- for mdl in big_node.named_children():
- self.graph.append(mdl)
- self.get_modules(self, mdl, input)
- def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
- all_encoder_layers = []
- def custom(start, end):
- def custom_forward(*inputs):
- layers = self.layer[start:end]
- x_ = inputs[0]
- for layer in layers:
- x_ = layer(x_, inputs[1])
- return x_
- return custom_forward
- if checkpoint_activations:
- l = 0
- num_layers = len(self.layer)
- chunk_length = math.ceil(math.sqrt(num_layers))
- while l < num_layers:
- hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1)
- l += chunk_length
- # decoder layers
- else:
- for i, layer_module in enumerate(self.layer):
- hidden_states = layer_module(hidden_states, attention_mask, self.grads, collect_all_grads=True)
- hidden_states.register_hook(lambda x, i=i, self=self: self.grads.append([x, "hidden_state"]))
- #print("pytorch weight is: ", layer_module.get_w())
- if output_all_encoded_layers:
- all_encoder_layers.append((hidden_states))
- if not output_all_encoded_layers or checkpoint_activations:
- hidden_states = self.FinalLayerNorm(hidden_states)
- all_encoder_layers.append((hidden_states))
- return all_encoder_layers
- #class BertEncoder(nn.Module):
- # def __init__(self, config):
- # super(BertEncoder, self).__init__()
- # layer = BertLayer(config)
- # self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
- #
- # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
- # all_encoder_layers = []
- # for layer_module in self.layer:
- # hidden_states = layer_module(hidden_states, attention_mask)
- # if output_all_encoded_layers:
- # all_encoder_layers.append(hidden_states)
- # if not output_all_encoded_layers:
- # all_encoder_layers.append(hidden_states)
- # return all_encoder_layers
- class BertPooler(nn.Module):
- def __init__(self, config):
- super(BertPooler, self).__init__()
- self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
- def forward(self, hidden_states):
- # We "pool" the model by simply taking the hidden state corresponding
- # to the first token.
- first_token_tensor = hidden_states[:, 0]
- pooled_output = self.dense_act(first_token_tensor)
- return pooled_output
- class BertPredictionHeadTransform(nn.Module):
- def __init__(self, config):
- super(BertPredictionHeadTransform, self).__init__()
- self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)
- self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
- def forward(self, hidden_states):
- hidden_states = self.dense_act(hidden_states)
- hidden_states = self.LayerNorm(hidden_states)
- return hidden_states
- class BertLMPredictionHead(nn.Module):
- def __init__(self, config, bert_model_embedding_weights):
- super(BertLMPredictionHead, self).__init__()
- self.transform = BertPredictionHeadTransform(config)
- # The output weights are the same as the input embeddings, but there is
- # an output-only bias for each token.
- self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
- bert_model_embedding_weights.size(0),
- bias=False)
- self.decoder.weight = bert_model_embedding_weights
- self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
- def forward(self, hidden_states):
- hidden_states = self.transform(hidden_states)
- get_accelerator().range_push("decoder input.size() = {}, weight.size() = {}".format(
- hidden_states.size(), self.decoder.weight.size()))
- hidden_states = self.decoder(hidden_states) + self.bias
- get_accelerator().range_pop()
- return hidden_states
- class BertOnlyMLMHead(nn.Module):
- def __init__(self, config, bert_model_embedding_weights):
- super(BertOnlyMLMHead, self).__init__()
- self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
- def forward(self, sequence_output):
- prediction_scores = self.predictions(sequence_output)
- return prediction_scores
- class BertOnlyNSPHead(nn.Module):
- def __init__(self, config):
- super(BertOnlyNSPHead, self).__init__()
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
- def forward(self, pooled_output):
- seq_relationship_score = self.seq_relationship(pooled_output)
- return seq_relationship_score
- class BertPreTrainingHeads(nn.Module):
- def __init__(self, config, bert_model_embedding_weights):
- super(BertPreTrainingHeads, self).__init__()
- self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
- self.seq_relationship = nn.Linear(config.hidden_size, 2)
- def forward(self, sequence_output, pooled_output):
- prediction_scores = self.predictions(sequence_output)
- seq_relationship_score = self.seq_relationship(pooled_output)
- return prediction_scores, seq_relationship_score
- class BertPreTrainedModel(nn.Module):
- """ An abstract class to handle weights initialization and
- a simple interface for downloading and loading pretrained models.
- """
- def __init__(self, config, *inputs, **kwargs):
- super(BertPreTrainedModel, self).__init__()
- if not isinstance(config, BertConfig):
- raise ValueError("Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
- "To create a model from a Google pretrained model use "
- "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
- self.__class__.__name__, self.__class__.__name__))
- self.config = config
- def init_bert_weights(self, module):
- """ Initialize the weights.
- """
- if isinstance(module, (nn.Linear, nn.Embedding)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- elif isinstance(module, BertLayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
- @classmethod
- def from_pretrained(cls,
- pretrained_model_name_or_path,
- state_dict=None,
- cache_dir=None,
- from_tf=False,
- *inputs,
- **kwargs):
- """
- Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
- Download and cache the pre-trained model file if needed.
- Params:
- pretrained_model_name_or_path: either:
- - a str with the name of a pre-trained model to load selected in the list of:
- . `bert-base-uncased`
- . `bert-large-uncased`
- . `bert-base-cased`
- . `bert-large-cased`
- . `bert-base-multilingual-uncased`
- . `bert-base-multilingual-cased`
- . `bert-base-chinese`
- - a path or url to a pretrained model archive containing:
- . `bert_config.json` a configuration file for the model
- . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- - a path or url to a pretrained model archive containing:
- . `bert_config.json` a configuration file for the model
- . `model.chkpt` a TensorFlow checkpoint
- from_tf: should we load the weights from a locally saved TensorFlow checkpoint
- cache_dir: an optional path to a folder in which the pre-trained models will be cached.
- state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
- *inputs, **kwargs: additional input for the specific Bert class
- (ex: num_labels for BertForSequenceClassification)
- """
- if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
- archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
- else:
- archive_file = pretrained_model_name_or_path
- if resolved_archive_file == archive_file: # noqa: F821
- logger.info("loading archive file {}".format(archive_file))
- else:
- logger.info("loading archive file {} from cache at {}".format(archive_file,
- resolved_archive_file)) # noqa: F821
- tempdir = None
- if os.path.isdir(resolved_archive_file) or from_tf: # noqa: F821
- serialization_dir = resolved_archive_file # noqa: F821
- else:
- # Extract archive to temp dir
- tempdir = tempfile.mkdtemp()
- logger.info("extracting archive file {} to temp dir {}".format(
- resolved_archive_file, # noqa: F821
- tempdir))
- with tarfile.open(resolved_archive_file, 'r:gz') as archive: # noqa: F821
- archive.extractall(tempdir)
- serialization_dir = tempdir
- # Load config
- config_file = os.path.join(serialization_dir, CONFIG_NAME)
- config = BertConfig.from_json_file(config_file)
- logger.info("Model config {}".format(config))
- # Instantiate model.
- model = cls(config, *inputs, **kwargs)
- if state_dict is None and not from_tf:
- weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
- state_dict = torch.load(weights_path, map_location='cpu' if not get_accelerator().is_available() else None)
- if tempdir:
- # Clean up temp dir
- shutil.rmtree(tempdir)
- if from_tf:
- # Directly load from a TensorFlow checkpoint
- weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
- return load_tf_weights_in_bert(model, weights_path)
- # Load from a PyTorch state_dict
- old_keys = []
- new_keys = []
- for key in state_dict.keys():
- new_key = None
- if 'gamma' in key:
- new_key = key.replace('gamma', 'weight')
- if 'beta' in key:
- new_key = key.replace('beta', 'bias')
- if new_key:
- old_keys.append(key)
- new_keys.append(new_key)
- for old_key, new_key in zip(old_keys, new_keys):
- state_dict[new_key] = state_dict.pop(old_key)
- missing_keys = []
- unexpected_keys = []
- error_msgs = []
- # copy state_dict so _load_from_state_dict can modify it
- metadata = getattr(state_dict, '_metadata', None)
- state_dict = state_dict.copy()
- if metadata is not None:
- state_dict._metadata = metadata
- def load(module, prefix=''):
- local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
- module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys,
- error_msgs)
- for name, child in module._modules.items():
- if child is not None:
- load(child, prefix + name + '.')
- start_prefix = ''
- if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
- start_prefix = 'bert.'
- load(model, prefix=start_prefix)
- if len(missing_keys) > 0:
- logger.info("Weights of {} not initialized from pretrained model: {}".format(
- model.__class__.__name__, missing_keys))
- if len(unexpected_keys) > 0:
- logger.info("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__,
- unexpected_keys))
- if len(error_msgs) > 0:
- raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
- model.__class__.__name__, "\n\t".join(error_msgs)))
- return model
- class BertModel(BertPreTrainedModel):
- """BERT model ("Bidirectional Embedding Representations from a Transformer").
- Params:
- config: a BertConfig class instance with the configuration to build a new model
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
- Outputs: Tuple of (encoded_layers, pooled_output)
- `encoded_layers`: controlled by `output_all_encoded_layers` argument:
- - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
- of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
- encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
- to the last attention block of shape [batch_size, sequence_length, hidden_size],
- `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
- classifier pretrained on top of the hidden state associated to the first character of the
- input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- model = modeling.BertModel(config=config)
- all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config):
- super(BertModel, self).__init__(config)
- self.embeddings = BertEmbeddings(config)
- self.encoder = BertEncoder(config)
- self.pooler = BertPooler(config)
- self.apply(self.init_bert_weights)
- def forward(self,
- input_ids,
- token_type_ids=None,
- attention_mask=None,
- output_all_encoded_layers=True,
- checkpoint_activations=False):
- if attention_mask is None:
- attention_mask = torch.ones_like(input_ids)
- if token_type_ids is None:
- token_type_ids = torch.zeros_like(input_ids)
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and -10000.0 for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
- embedding_output = self.embeddings(input_ids, token_type_ids)
- encoded_layers = self.encoder(embedding_output,
- extended_attention_mask,
- output_all_encoded_layers=output_all_encoded_layers,
- checkpoint_activations=checkpoint_activations)
- sequence_output = encoded_layers[-1]
- pooled_output = self.pooler(sequence_output)
- if not output_all_encoded_layers:
- encoded_layers = encoded_layers[-1]
- return encoded_layers, pooled_output
- class BertForPreTraining(BertPreTrainedModel):
- """BERT model with pre-training heads.
- This module comprises the BERT model followed by the two pre-training heads:
- - the masked language modeling head, and
- - the next sentence classification head.
- Params:
- config: a BertConfig class instance with the configuration to build a new model.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
- with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
- is only computed for the labels set in [0, ..., vocab_size]
- `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
- with indices selected in [0, 1].
- 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
- Outputs:
- if `masked_lm_labels` and `next_sentence_label` are not `None`:
- Outputs the total_loss which is the sum of the masked language modeling loss and the next
- sentence classification loss.
- if `masked_lm_labels` or `next_sentence_label` is `None`:
- Outputs a tuple comprising
- - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- - the next sentence classification logits of shape [batch_size, 2].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- model = BertForPreTraining(config)
- masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config, args):
- super(BertForPreTraining, self).__init__(config)
- self.summary_writer = None
- if dist.get_rank() == 0:
- self.summary_writer = args.summary_writer
- self.samples_per_step = dist.get_world_size() * args.train_batch_size
- self.sample_count = self.samples_per_step
- self.bert = BertModel(config)
- self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
- self.apply(self.init_bert_weights)
- def log_summary_writer(self, logs: dict, base='Train'):
- if dist.get_rank() == 0:
- module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type))
- for key, log in logs.items():
- self.summary_writer.add_scalar(f'{base}/{module_name}/{key}', log, self.sample_count)
- self.sample_count += self.samples_per_step
- def forward(self, batch, log=True):
- #input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False):
- input_ids = batch[1]
- token_type_ids = batch[3]
- attention_mask = batch[2]
- masked_lm_labels = batch[5]
- next_sentence_label = batch[4]
- checkpoint_activations = False
- sequence_output, pooled_output = self.bert(input_ids,
- token_type_ids,
- attention_mask,
- output_all_encoded_layers=False,
- checkpoint_activations=checkpoint_activations)
- prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
- if masked_lm_labels is not None and next_sentence_label is not None:
- loss_fct = CrossEntropyLoss(ignore_index=-1)
- masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
- next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
- #print("loss is {} {}".format(masked_lm_loss, next_sentence_loss))
- total_loss = masked_lm_loss + next_sentence_loss
- # if log:
- # self.log_summary_writer(logs={'train_loss': total_loss.item()})
- return total_loss
- else:
- return prediction_scores, seq_relationship_score
- class BertForMaskedLM(BertPreTrainedModel):
- """BERT model with the masked language modeling head.
- This module comprises the BERT model followed by the masked language modeling head.
- Params:
- config: a BertConfig class instance with the configuration to build a new model.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
- with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
- is only computed for the labels set in [0, ..., vocab_size]
- Outputs:
- if `masked_lm_labels` is not `None`:
- Outputs the masked language modeling loss.
- if `masked_lm_labels` is `None`:
- Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- model = BertForMaskedLM(config)
- masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config):
- super(BertForMaskedLM, self).__init__(config)
- self.bert = BertModel(config)
- self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
- self.apply(self.init_bert_weights)
- def forward(self,
- input_ids,
- token_type_ids=None,
- attention_mask=None,
- masked_lm_labels=None,
- checkpoint_activations=False):
- sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
- prediction_scores = self.cls(sequence_output)
- if masked_lm_labels is not None:
- loss_fct = CrossEntropyLoss(ignore_index=-1)
- masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
- return masked_lm_loss
- else:
- return prediction_scores
- class BertForNextSentencePrediction(BertPreTrainedModel):
- """BERT model with next sentence prediction head.
- This module comprises the BERT model followed by the next sentence classification head.
- Params:
- config: a BertConfig class instance with the configuration to build a new model.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
- with indices selected in [0, 1].
- 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
- Outputs:
- if `next_sentence_label` is not `None`:
- Outputs the total_loss which is the sum of the masked language modeling loss and the next
- sentence classification loss.
- if `next_sentence_label` is `None`:
- Outputs the next sentence classification logits of shape [batch_size, 2].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- model = BertForNextSentencePrediction(config)
- seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config):
- super(BertForNextSentencePrediction, self).__init__(config)
- self.bert = BertModel(config)
- self.cls = BertOnlyNSPHead(config)
- self.apply(self.init_bert_weights)
- def forward(self,
- input_ids,
- token_type_ids=None,
- attention_mask=None,
- next_sentence_label=None,
- checkpoint_activations=False):
- _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
- seq_relationship_score = self.cls(pooled_output)
- if next_sentence_label is not None:
- loss_fct = CrossEntropyLoss(ignore_index=-1)
- next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
- return next_sentence_loss
- else:
- return seq_relationship_score
- class BertForSequenceClassification(BertPreTrainedModel):
- """BERT model for classification.
- This module is composed of the BERT model with a linear layer on top of
- the pooled output.
- Params:
- `config`: a BertConfig class instance with the configuration to build a new model.
- `num_labels`: the number of classes for the classifier. Default = 2.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
- with indices selected in [0, ..., num_labels].
- Outputs:
- if `labels` is not `None`:
- Outputs the CrossEntropy classification loss of the output with the labels.
- if `labels` is `None`:
- Outputs the classification logits of shape [batch_size, num_labels].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- num_labels = 2
- model = BertForSequenceClassification(config, num_labels)
- logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config, num_labels):
- super(BertForSequenceClassification, self).__init__(config)
- self.num_labels = num_labels
- self.bert = BertModel(config)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.classifier = nn.Linear(config.hidden_size, num_labels)
- self.apply(self.init_bert_weights)
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
- _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
- pooled_output = self.dropout(pooled_output)
- logits = self.classifier(pooled_output)
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
- return loss
- else:
- return logits
- class BertForMultipleChoice(BertPreTrainedModel):
- """BERT model for multiple choice tasks.
- This module is composed of the BERT model with a linear layer on top of
- the pooled output.
- Params:
- `config`: a BertConfig class instance with the configuration to build a new model.
- `num_choices`: the number of classes for the classifier. Default = 2.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
- with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
- and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
- with indices selected in [0, ..., num_choices].
- Outputs:
- if `labels` is not `None`:
- Outputs the CrossEntropy classification loss of the output with the labels.
- if `labels` is `None`:
- Outputs the classification logits of shape [batch_size, num_labels].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
- input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
- token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- num_choices = 2
- model = BertForMultipleChoice(config, num_choices)
- logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config, num_choices):
- super(BertForMultipleChoice, self).__init__(config)
- self.num_choices = num_choices
- self.bert = BertModel(config)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.classifier = nn.Linear(config.hidden_size, 1)
- self.apply(self.init_bert_weights)
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
- flat_input_ids = input_ids.view(-1, input_ids.size(-1))
- flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
- flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
- _, pooled_output = self.bert(flat_input_ids,
- flat_token_type_ids,
- flat_attention_mask,
- output_all_encoded_layers=False)
- pooled_output = self.dropout(pooled_output)
- logits = self.classifier(pooled_output)
- reshaped_logits = logits.view(-1, self.num_choices)
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(reshaped_logits, labels)
- return loss
- else:
- return reshaped_logits
- class BertForTokenClassification(BertPreTrainedModel):
- """BERT model for token-level classification.
- This module is composed of the BERT model with a linear layer on top of
- the full hidden state of the last layer.
- Params:
- `config`: a BertConfig class instance with the configuration to build a new model.
- `num_labels`: the number of classes for the classifier. Default = 2.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
- with indices selected in [0, ..., num_labels].
- Outputs:
- if `labels` is not `None`:
- Outputs the CrossEntropy classification loss of the output with the labels.
- if `labels` is `None`:
- Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- num_labels = 2
- model = BertForTokenClassification(config, num_labels)
- logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config, num_labels):
- super(BertForTokenClassification, self).__init__(config)
- self.num_labels = num_labels
- self.bert = BertModel(config)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.classifier = nn.Linear(config.hidden_size, num_labels)
- self.apply(self.init_bert_weights)
- def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
- sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
- sequence_output = self.dropout(sequence_output)
- logits = self.classifier(sequence_output)
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- # Only keep active parts of the loss
- if attention_mask is not None:
- active_loss = attention_mask.view(-1) == 1
- active_logits = logits.view(-1, self.num_labels)[active_loss]
- active_labels = labels.view(-1)[active_loss]
- loss = loss_fct(active_logits, active_labels)
- else:
- loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
- return loss
- else:
- return logits
- class BertForQuestionAnswering(BertPreTrainedModel):
- """BERT model for Question Answering (span extraction).
- This module is composed of the BERT model with a linear layer on top of
- the sequence output that computes start_logits and end_logits
- Params:
- `config`: a BertConfig class instance with the configuration to build a new model.
- Inputs:
- `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
- with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
- `extract_features.py`, `run_classifier.py` and `run_squad.py`)
- `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
- types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
- a `sentence B` token (see BERT paper for more details).
- `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
- selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
- input sequence length in the current batch. It's the mask that we typically use for attention when
- a batch has varying length sentences.
- `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
- Positions are clamped to the length of the sequence and position outside of the sequence are not taken
- into account for computing the loss.
- `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
- Positions are clamped to the length of the sequence and position outside of the sequence are not taken
- into account for computing the loss.
- Outputs:
- if `start_positions` and `end_positions` are not `None`:
- Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
- if `start_positions` or `end_positions` is `None`:
- Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
- position tokens of shape [batch_size, sequence_length].
- Example usage:
- ```python
- # Already been converted into WordPiece token ids
- input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
- input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
- token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
- config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
- num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
- model = BertForQuestionAnswering(config)
- start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
- ```
- """
- def __init__(self, config):
- super(BertForQuestionAnswering, self).__init__(config)
- self.bert = BertModel(config)
- # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
- # self.dropout = nn.Dropout(config.hidden_dropout_prob)
- self.qa_outputs = nn.Linear(config.hidden_size, 2)
- self.apply(self.init_bert_weights)
- def forward(self,
- input_ids,
- token_type_ids=None,
- attention_mask=None,
- start_positions=None,
- end_positions=None,
- checkpoint_activations=False):
- sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
- logits = self.qa_outputs(sequence_output)
- start_logits, end_logits = logits.split(1, dim=-1)
- start_logits = start_logits.squeeze(-1)
- end_logits = end_logits.squeeze(-1)
- if start_positions is not None and end_positions is not None:
- # If we are on multi-GPU, split add a dimension
- if len(start_positions.size()) > 1:
- start_positions = start_positions.squeeze(-1)
- if len(end_positions.size()) > 1:
- end_positions = end_positions.squeeze(-1)
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
- ignored_index = start_logits.size(1)
- start_positions.clamp_(0, ignored_index)
- end_positions.clamp_(0, ignored_index)
- loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
- start_loss = loss_fct(start_logits, start_positions)
- end_loss = loss_fct(end_logits, end_positions)
- total_loss = (start_loss + end_loss) / 2
- return total_loss
- else:
- return start_logits, end_logits
|