modelingpreln.py 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665
  1. # DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f
  2. # https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py
  3. # coding=utf-8
  4. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
  5. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  6. #
  7. # Licensed under the Apache License, Version 2.0 (the "License");
  8. # you may not use this file except in compliance with the License.
  9. # You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. """PyTorch BERT model."""
  19. from __future__ import absolute_import, division, print_function, unicode_literals
  20. import copy
  21. import json
  22. import logging
  23. import math
  24. import os
  25. import shutil
  26. import tarfile
  27. import tempfile
  28. import sys
  29. from io import open
  30. import torch
  31. from torch import nn
  32. from torch.nn import CrossEntropyLoss
  33. from torch.utils import checkpoint
  34. import torch.distributed as dist
  35. from torch.nn import Module
  36. from torch.nn.parameter import Parameter
  37. import torch.nn.functional as F
  38. import torch.nn.init as init
  39. import time
  40. #from numba import cuda
  41. #from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax
  42. logger = logging.getLogger(__name__)
  43. PRETRAINED_MODEL_ARCHIVE_MAP = {
  44. 'bert-base-uncased':
  45. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
  46. 'bert-large-uncased':
  47. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
  48. 'bert-base-cased':
  49. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
  50. 'bert-large-cased':
  51. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
  52. 'bert-base-multilingual-uncased':
  53. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
  54. 'bert-base-multilingual-cased':
  55. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
  56. 'bert-base-chinese':
  57. "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
  58. }
  59. CONFIG_NAME = 'bert_config.json'
  60. WEIGHTS_NAME = 'pytorch_model.bin'
  61. TF_WEIGHTS_NAME = 'model.ckpt'
  62. def load_tf_weights_in_bert(model, tf_checkpoint_path):
  63. """ Load tf checkpoints in a pytorch model
  64. """
  65. try:
  66. import re
  67. import numpy as np
  68. import tensorflow as tf
  69. except ImportError:
  70. print(
  71. "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
  72. "https://www.tensorflow.org/install/ for installation instructions.")
  73. raise
  74. tf_path = os.path.abspath(tf_checkpoint_path)
  75. print("Converting TensorFlow checkpoint from {}".format(tf_path))
  76. # Load weights from TF model
  77. init_vars = tf.train.list_variables(tf_path)
  78. names = []
  79. arrays = []
  80. for name, shape in init_vars:
  81. print("Loading TF weight {} with shape {}".format(name, shape))
  82. array = tf.train.load_variable(tf_path, name)
  83. names.append(name)
  84. arrays.append(array)
  85. for name, array in zip(names, arrays):
  86. name = name.split('/')
  87. # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
  88. # which are not required for using pretrained model
  89. if any(n in ["adam_v", "adam_m"] for n in name):
  90. print("Skipping {}".format("/".join(name)))
  91. continue
  92. pointer = model
  93. for m_name in name:
  94. if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
  95. l = re.split(r'_(\d+)', m_name)
  96. else:
  97. l = [m_name]
  98. if l[0] == 'kernel' or l[0] == 'gamma':
  99. pointer = getattr(pointer, 'weight')
  100. elif l[0] == 'output_bias' or l[0] == 'beta':
  101. pointer = getattr(pointer, 'bias')
  102. elif l[0] == 'output_weights':
  103. pointer = getattr(pointer, 'weight')
  104. else:
  105. pointer = getattr(pointer, l[0])
  106. if len(l) >= 2:
  107. num = int(l[1])
  108. pointer = pointer[num]
  109. if m_name[-11:] == '_embeddings':
  110. pointer = getattr(pointer, 'weight')
  111. elif m_name == 'kernel':
  112. array = np.transpose(array)
  113. try:
  114. assert pointer.shape == array.shape
  115. except AssertionError as e:
  116. e.args += (pointer.shape, array.shape)
  117. raise
  118. print("Initialize PyTorch weight {}".format(name))
  119. pointer.data = torch.from_numpy(array)
  120. return model
  121. @torch.jit.script
  122. def f_gelu(x):
  123. return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
  124. @torch.jit.script
  125. def bias_gelu(bias, y):
  126. x = bias + y
  127. return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
  128. @torch.jit.script
  129. def bias_tanh(bias, y):
  130. x = bias + y
  131. return torch.tanh(x)
  132. def gelu(x):
  133. """Implementation of the gelu activation function.
  134. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
  135. 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
  136. Also see https://arxiv.org/abs/1606.08415
  137. """
  138. return f_gelu(x)
  139. def swish(x):
  140. return x * torch.sigmoid(x)
  141. ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
  142. class GPUTimer:
  143. def __init__(self):
  144. super().__init__()
  145. self.start = cuda.event()
  146. self.stop = cuda.event()
  147. def record(self):
  148. self.start.record()
  149. def elapsed(self):
  150. self.stop.record()
  151. self.stop.synchronize()
  152. return self.start.elapsed_time(self.stop) / 1000.0
  153. class LinearActivation(Module):
  154. r"""Fused Linear and activation Module.
  155. """
  156. __constants__ = ['bias']
  157. def __init__(self,
  158. in_features,
  159. out_features,
  160. weights,
  161. biases,
  162. act='gelu',
  163. bias=True):
  164. super(LinearActivation, self).__init__()
  165. self.in_features = in_features
  166. self.out_features = out_features
  167. self.fused_gelu = False
  168. self.fused_tanh = False
  169. if isinstance(act,
  170. str) or (sys.version_info[0] == 2 and isinstance(act,
  171. unicode)):
  172. if bias and act == 'gelu':
  173. self.fused_gelu = True
  174. elif bias and act == 'tanh':
  175. self.fused_tanh = True
  176. else:
  177. self.act_fn = ACT2FN[act]
  178. else:
  179. self.act_fn = act
  180. #self.weight = Parameter(torch.Tensor(out_features, in_features))
  181. self.weight = weights[5]
  182. self.bias = biases[5]
  183. #if bias:
  184. # self.bias = Parameter(torch.Tensor(out_features))
  185. #else:
  186. # self.register_parameter('bias', None)
  187. #self.reset_parameters()
  188. def reset_parameters(self):
  189. init.kaiming_uniform_(self.weight, a=math.sqrt(5))
  190. if self.bias is not None:
  191. fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
  192. bound = 1 / math.sqrt(fan_in)
  193. init.uniform_(self.bias, -bound, bound)
  194. def forward(self, input):
  195. if self.fused_gelu:
  196. #timing = []
  197. #t1 = GPUTimer()
  198. #t1.record()
  199. y = F.linear(input, self.weight, None)
  200. #timing.append(t1.elapsed())
  201. #t1.record()
  202. bg = bias_gelu(self.bias, y)
  203. #timing.append(t1.elapsed())
  204. return bg
  205. elif self.fused_tanh:
  206. return bias_tanh(self.bias, F.linear(input, self.weight, None))
  207. else:
  208. return self.act_fn(F.linear(input, self.weight, self.bias))
  209. def extra_repr(self):
  210. return 'in_features={}, out_features={}, bias={}'.format(
  211. self.in_features,
  212. self.out_features,
  213. self.bias is not None)
  214. class BertConfig(object):
  215. """Configuration class to store the configuration of a `BertModel`.
  216. """
  217. def __init__(self,
  218. vocab_size_or_config_json_file,
  219. hidden_size=768,
  220. num_hidden_layers=12,
  221. num_attention_heads=12,
  222. intermediate_size=3072,
  223. batch_size=8,
  224. hidden_act="gelu",
  225. hidden_dropout_prob=0.1,
  226. attention_probs_dropout_prob=0.1,
  227. max_position_embeddings=512,
  228. type_vocab_size=2,
  229. initializer_range=0.02,
  230. fp16=False):
  231. """Constructs BertConfig.
  232. Args:
  233. vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
  234. hidden_size: Size of the encoder layers and the pooler layer.
  235. num_hidden_layers: Number of hidden layers in the Transformer encoder.
  236. num_attention_heads: Number of attention heads for each attention layer in
  237. the Transformer encoder.
  238. intermediate_size: The size of the "intermediate" (i.e., feed-forward)
  239. layer in the Transformer encoder.
  240. hidden_act: The non-linear activation function (function or string) in the
  241. encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
  242. hidden_dropout_prob: The dropout probabilitiy for all fully connected
  243. layers in the embeddings, encoder, and pooler.
  244. attention_probs_dropout_prob: The dropout ratio for the attention
  245. probabilities.
  246. max_position_embeddings: The maximum sequence length that this model might
  247. ever be used with. Typically set this to something large just in case
  248. (e.g., 512 or 1024 or 2048).
  249. type_vocab_size: The vocabulary size of the `token_type_ids` passed into
  250. `BertModel`.
  251. initializer_range: The sttdev of the truncated_normal_initializer for
  252. initializing all weight matrices.
  253. """
  254. if isinstance(vocab_size_or_config_json_file,
  255. str) or (sys.version_info[0] == 2
  256. and isinstance(vocab_size_or_config_json_file,
  257. unicode)):
  258. with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
  259. json_config = json.loads(reader.read())
  260. for key, value in json_config.items():
  261. self.__dict__[key] = value
  262. elif isinstance(vocab_size_or_config_json_file, int):
  263. self.vocab_size = vocab_size_or_config_json_file
  264. self.hidden_size = hidden_size
  265. self.num_hidden_layers = num_hidden_layers
  266. self.num_attention_heads = num_attention_heads
  267. self.batch_size = batch_size
  268. self.hidden_act = hidden_act
  269. self.intermediate_size = intermediate_size
  270. self.hidden_dropout_prob = hidden_dropout_prob
  271. self.attention_probs_dropout_prob = attention_probs_dropout_prob
  272. self.max_position_embeddings = max_position_embeddings
  273. self.type_vocab_size = type_vocab_size
  274. self.initializer_range = initializer_range
  275. self.fp16 = fp16
  276. else:
  277. raise ValueError("First argument must be either a vocabulary size (int)"
  278. "or the path to a pretrained model config file (str)")
  279. @classmethod
  280. def from_dict(cls, json_object):
  281. """Constructs a `BertConfig` from a Python dictionary of parameters."""
  282. config = BertConfig(vocab_size_or_config_json_file=-1)
  283. for key, value in json_object.items():
  284. config.__dict__[key] = value
  285. return config
  286. @classmethod
  287. def from_json_file(cls, json_file):
  288. """Constructs a `BertConfig` from a json file of parameters."""
  289. with open(json_file, "r", encoding='utf-8') as reader:
  290. text = reader.read()
  291. return cls.from_dict(json.loads(text))
  292. def __repr__(self):
  293. return str(self.to_json_string())
  294. def to_dict(self):
  295. """Serializes this instance to a Python dictionary."""
  296. output = copy.deepcopy(self.__dict__)
  297. return output
  298. def to_json_string(self):
  299. """Serializes this instance to a JSON string."""
  300. return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
  301. try:
  302. import apex
  303. #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
  304. import apex.normalization
  305. #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
  306. BertLayerNorm = apex.normalization.FusedLayerNorm
  307. except ImportError:
  308. print(
  309. "Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex."
  310. )
  311. class BertLayerNorm(nn.Module):
  312. def __init__(self, hidden_size, eps=1e-12):
  313. """Construct a layernorm module in the TF style (epsilon inside the square root).
  314. """
  315. super(BertLayerNorm, self).__init__()
  316. self.weight = nn.Parameter(torch.ones(hidden_size))
  317. self.bias = nn.Parameter(torch.zeros(hidden_size))
  318. self.variance_epsilon = eps
  319. def forward(self, x):
  320. u = x.mean(-1, keepdim=True)
  321. s = (x - u).pow(2).mean(-1, keepdim=True)
  322. x = (x - u) / torch.sqrt(s + self.variance_epsilon)
  323. return self.weight * x + self.bias
  324. class BertEmbeddings(nn.Module):
  325. """Construct the embeddings from word, position and token_type embeddings.
  326. """
  327. def __init__(self, config):
  328. super(BertEmbeddings, self).__init__()
  329. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
  330. self.position_embeddings = nn.Embedding(config.max_position_embeddings,
  331. config.hidden_size)
  332. self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
  333. config.hidden_size)
  334. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  335. # any TensorFlow checkpoint file
  336. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  337. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  338. def forward(self, input_ids, token_type_ids=None):
  339. seq_length = input_ids.size(1)
  340. position_ids = torch.arange(seq_length,
  341. dtype=torch.long,
  342. device=input_ids.device)
  343. position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
  344. if token_type_ids is None:
  345. token_type_ids = torch.zeros_like(input_ids)
  346. words_embeddings = self.word_embeddings(input_ids)
  347. position_embeddings = self.position_embeddings(position_ids)
  348. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  349. embeddings = words_embeddings + position_embeddings + token_type_embeddings
  350. embeddings = self.LayerNorm(embeddings)
  351. embeddings = self.dropout(embeddings)
  352. return embeddings
  353. class BertSelfAttention(nn.Module):
  354. def __init__(self, i, config, weights, biases):
  355. super(BertSelfAttention, self).__init__()
  356. if config.hidden_size % config.num_attention_heads != 0:
  357. raise ValueError(
  358. "The hidden size (%d) is not a multiple of the number of attention "
  359. "heads (%d)" % (config.hidden_size,
  360. config.num_attention_heads))
  361. self.num_attention_heads = config.num_attention_heads
  362. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  363. self.all_head_size = self.num_attention_heads * self.attention_head_size
  364. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  365. self.query.weight = weights[0]
  366. self.query.bias = biases[0]
  367. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  368. self.key.weight = weights[1]
  369. self.key.bias = biases[1]
  370. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  371. self.value.weight = weights[2]
  372. self.value.bias = biases[2]
  373. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  374. self.softmax = nn.Softmax(dim=-1)
  375. #self.softmax_config = DeepSpeedSoftmaxConfig()
  376. #self.softmax_config.batch_size = config.batch_size
  377. #self.softmax_config.max_seq_length = config.max_position_embeddings
  378. #self.softmax_config.hidden_size = config.hidden_size
  379. #self.softmax_config.heads = config.num_attention_heads
  380. #self.softmax_config.softmax_id = i
  381. #self.softmax_config.fp16 = config.fp16
  382. #self.softmax_config.prob_drop_out = 0.0
  383. #self.softmax = DeepSpeedSoftmax(i, self.softmax_config)
  384. def transpose_for_scores(self, x):
  385. new_x_shape = x.size()[:-1] + (self.num_attention_heads,
  386. self.attention_head_size)
  387. x = x.view(*new_x_shape)
  388. return x.permute(0, 2, 1, 3)
  389. def transpose_key_for_scores(self, x):
  390. new_x_shape = x.size()[:-1] + (self.num_attention_heads,
  391. self.attention_head_size)
  392. x = x.view(*new_x_shape)
  393. return x.permute(0, 2, 3, 1)
  394. def forward(self, hidden_states, attention_mask, grads=None):
  395. #timing = []
  396. #t1 = GPUTimer()
  397. #t1.record()
  398. mixed_query_layer = self.query(hidden_states)
  399. #timing.append(t1.elapsed())
  400. #print("Query elapsed: %s" % (time.clock() - start))
  401. #t1.record()
  402. mixed_key_layer = self.key(hidden_states)
  403. #timing.append(t1.elapsed())
  404. #print("Key elapsed: %s" % (time.clock() - start))
  405. #t1.record()
  406. mixed_value_layer = self.value(hidden_states)
  407. #timing.append(t1.elapsed())
  408. #print("Value elapsed: %s" % (time.clock() - start))
  409. #t1.record()
  410. query_layer = self.transpose_for_scores(mixed_query_layer)
  411. # print(query_layer)
  412. #timing.append(t1.elapsed())
  413. #print("Query-Transform elapsed: %s" % (time.clock() - start))
  414. #t1.record()
  415. key_layer = self.transpose_key_for_scores(mixed_key_layer)
  416. # print(key_layer)
  417. #timing.append(t1.elapsed())
  418. #print("Key-Transform elapsed: %s" % (time.clock() - start))
  419. #t1.record()
  420. value_layer = self.transpose_for_scores(mixed_value_layer)
  421. #print(value_layer)
  422. #timing.append(t1.elapsed())
  423. #print("Value-Transform elapsed: %s" % (time.clock() - start))
  424. # Take the dot product between "query" and "key" to get the raw attention scores.
  425. #t1.record()
  426. #print(query_layer.shape)
  427. #print(key_layer.shape)
  428. attention_scores = torch.matmul(query_layer, key_layer)
  429. #print(attention_scores.shape)
  430. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  431. #print("Pytorch: ", attention_scores)
  432. #timing.append(t1.elapsed())
  433. #print("Attention-Score elapsed: %s" % (time.clock() - start))
  434. # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
  435. #t1.record()
  436. # context_layer = self.softmax(query_layer, key_layer, value_layer, attention_mask)
  437. #print("context shape is :", context_layer.shape)
  438. #print("Cuda-ext:, ", attention_scores1)
  439. # Normalize the attention scores to probabilities.
  440. ####attention_probs = self.softmax(attention_scores)
  441. #timing.append(t1.elapsed())
  442. #print("Softmax elapsed: %s" % (time.clock() - start))
  443. #t1 = GPUTimer()
  444. #t1.record()
  445. attention_scores = attention_scores + attention_mask
  446. attention_probs = self.softmax(attention_scores)
  447. #attention_scores = self.softmax(attention_scores, attention_mask)
  448. #print("Softmax elapse {0:8.2f} ms", t1.elapsed() * 1000)
  449. # This is actually dropping out entire tokens to attend to, which might
  450. # seem a bit unusual, but is taken from the original Transformer paper.
  451. attention_probs = self.dropout(attention_probs)
  452. #t1.record()
  453. context_layer = torch.matmul(attention_probs, value_layer)
  454. #timing.append(t1.elapsed())
  455. #print("Context elapsed: %s" % (time.clock() - start))
  456. #t1.record()
  457. #context_layer1 = context_layer.permute(
  458. # 0, 1, 3, 2, 4).contiguous()
  459. #if grads is not None:
  460. # context_layer.register_hook(lambda x, self = self : grads.append([x, "Context"]))
  461. context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous()
  462. new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, )
  463. context_layer1 = context_layer1.view(*new_context_layer_shape)
  464. #timing.append(t1.elapsed())
  465. #print("Context-Transform elapsed: %s" % (time.clock() - start))
  466. if grads is not None:
  467. query_layer.register_hook(lambda x, self=self: grads.append([x, "Query"]))
  468. key_layer.register_hook(lambda x, self=self: grads.append([x, "Key"]))
  469. value_layer.register_hook(lambda x, self=self: grads.append([x, "Value"]))
  470. return context_layer1
  471. class BertSelfOutput(nn.Module):
  472. def __init__(self, config, weights, biases):
  473. super(BertSelfOutput, self).__init__()
  474. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  475. self.dense.weight = weights[3]
  476. self.dense.bias = biases[3]
  477. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  478. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  479. def forward(self, hidden_states, input_tensor):
  480. #timing = []
  481. #t1 = GPUTimer()
  482. #t1.record()
  483. hidden_states = self.dense(hidden_states)
  484. #timing.append(t1.elapsed())
  485. #print("Attention Output elapsed: %s" % (time.clock() - start))
  486. hidden_states = self.dropout(hidden_states)
  487. #t1.record()
  488. #hidden_states = self.LayerNorm(hidden_states + input_tensor)
  489. #timing.append(t1.elapsed())
  490. #print("LayerNorm elapsed: %s" % (time.clock() - start))
  491. return hidden_states
  492. def get_w(self):
  493. return self.dense.weight
  494. class BertAttention(nn.Module):
  495. def __init__(self, i, config, weights, biases):
  496. super(BertAttention, self).__init__()
  497. self.self = BertSelfAttention(i, config, weights, biases)
  498. self.output = BertSelfOutput(config, weights, biases)
  499. def forward(self, input_tensor, attention_mask):
  500. self_output = self.self(input_tensor, attention_mask)
  501. attention_output = self.output(self_output, input_tensor)
  502. return attention_output
  503. def get_w(self):
  504. return self.output.get_w()
  505. class BertIntermediate(nn.Module):
  506. def __init__(self, config, weights, biases):
  507. super(BertIntermediate, self).__init__()
  508. self.dense_act = LinearActivation(config.hidden_size,
  509. config.intermediate_size,
  510. weights,
  511. biases,
  512. act=config.hidden_act)
  513. def forward(self, hidden_states):
  514. hidden_states = self.dense_act(hidden_states)
  515. return hidden_states
  516. class BertOutput(nn.Module):
  517. def __init__(self, config, weights, biases):
  518. super(BertOutput, self).__init__()
  519. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  520. self.dense.weight = weights[6]
  521. self.dense.bias = biases[6]
  522. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  523. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  524. def forward(self, hidden_states, input_tensor):
  525. #timing = []
  526. #t1 = GPUTimer()
  527. #t1.record()
  528. #print (hidden_states)
  529. #print (self.dense.weight)
  530. hidden_states = self.dense(hidden_states)
  531. #timing.append(t1.elapsed())
  532. #print("FF2 elapsed: %s" % (time.clock() - start))
  533. hidden_states = self.dropout(hidden_states)
  534. #t1.record()
  535. #hidden_states = self.LayerNorm(hidden_states + input_tensor)
  536. #timing.append(t1.elapsed())
  537. #print("LayerNorm elapsed: %s" % (time.clock() - start))
  538. return hidden_states
  539. class BertLayer(nn.Module):
  540. def __init__(self, i, config, weights, biases):
  541. super(BertLayer, self).__init__()
  542. self.attention = BertAttention(i, config, weights, biases)
  543. self.PreAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  544. self.PostAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  545. self.intermediate = BertIntermediate(config, weights, biases)
  546. self.output = BertOutput(config, weights, biases)
  547. self.weight = weights
  548. self.biases = biases
  549. def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False):
  550. input_layer_norm = self.PreAttentionLayerNorm(hidden_states)
  551. attention_output = self.attention(input_layer_norm, attention_mask)
  552. #print ("hidden shape is :", hidden_states.shape)
  553. intermediate_input = hidden_states + attention_output
  554. intermediate_layer_norm = self.PostAttentionLayerNorm(intermediate_input)
  555. intermediate_output = self.intermediate(intermediate_layer_norm)
  556. layer_output = self.output(intermediate_output, attention_output)
  557. #attention_output = self.attention(hidden_states, attention_mask)
  558. #intermediate_output = self.intermediate(attention_output)
  559. #layer_output = self.output(intermediate_output, attention_output)
  560. if collect_all_grads:
  561. # self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"]))
  562. # self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"]))
  563. # self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"]))
  564. # self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"]))
  565. self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"]))
  566. self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"]))
  567. self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"]))
  568. self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"]))
  569. self.PostAttentionLayerNorm.weight.register_hook(
  570. lambda x,
  571. self=self: grads.append([x,
  572. "N2_W"]))
  573. self.PostAttentionLayerNorm.bias.register_hook(
  574. lambda x,
  575. self=self: grads.append([x,
  576. "N2_B"]))
  577. self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"]))
  578. self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"]))
  579. self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"]))
  580. self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"]))
  581. self.PreAttentionLayerNorm.weight.register_hook(
  582. lambda x,
  583. self=self: grads.append([x,
  584. "norm_W"]))
  585. self.PreAttentionLayerNorm.bias.register_hook(
  586. lambda x,
  587. self=self: grads.append([x,
  588. "norm_B"]))
  589. return layer_output + intermediate_input
  590. def get_w(self):
  591. return self.attention.get_w()
  592. class BertEncoder(nn.Module):
  593. def __init__(self, config, weights, biases):
  594. super(BertEncoder, self).__init__()
  595. #layer = BertLayer(config, weights, biases)
  596. self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  597. self.layer = nn.ModuleList([
  598. copy.deepcopy(BertLayer(i,
  599. config,
  600. weights,
  601. biases)) for i in range(config.num_hidden_layers)
  602. ])
  603. self.grads = []
  604. self.graph = []
  605. def get_grads(self):
  606. return self.grads
  607. # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
  608. # all_encoder_layers = []
  609. # for layer_module in self.layer:
  610. # hidden_states = layer_module(hidden_states, attention_mask)
  611. # if output_all_encoded_layers:
  612. # all_encoder_layers.append(hidden_states)
  613. # if not output_all_encoded_layers:
  614. # all_encoder_layers.append(hidden_states)
  615. # return all_encoder_layers
  616. def get_modules(self, big_node, input):
  617. for mdl in big_node.named_children():
  618. graph.append(mdl)
  619. get_modules(self, mdl, input)
  620. def forward(self,
  621. hidden_states,
  622. attention_mask,
  623. output_all_encoded_layers=True,
  624. checkpoint_activations=False):
  625. all_encoder_layers = []
  626. def custom(start, end):
  627. def custom_forward(*inputs):
  628. layers = self.layer[start:end]
  629. x_ = inputs[0]
  630. for layer in layers:
  631. x_ = layer(x_, inputs[1])
  632. return x_
  633. return custom_forward
  634. if checkpoint_activations:
  635. l = 0
  636. num_layers = len(self.layer)
  637. chunk_length = math.ceil(math.sqrt(num_layers))
  638. while l < num_layers:
  639. hidden_states = checkpoint.checkpoint(custom(l,
  640. l + chunk_length),
  641. hidden_states,
  642. attention_mask * 1)
  643. l += chunk_length
  644. # decoder layers
  645. else:
  646. for i, layer_module in enumerate(self.layer):
  647. hidden_states = layer_module(hidden_states,
  648. attention_mask,
  649. self.grads,
  650. collect_all_grads=True)
  651. hidden_states.register_hook(
  652. lambda x,
  653. i=i,
  654. self=self: self.grads.append([x,
  655. "hidden_state"]))
  656. #print("pytorch weight is: ", layer_module.get_w())
  657. if output_all_encoded_layers:
  658. all_encoder_layers.append((hidden_states))
  659. if not output_all_encoded_layers or checkpoint_activations:
  660. hidden_states = self.FinalLayerNorm(hidden_states)
  661. all_encoder_layers.append((hidden_states))
  662. return all_encoder_layers
  663. #class BertEncoder(nn.Module):
  664. # def __init__(self, config):
  665. # super(BertEncoder, self).__init__()
  666. # layer = BertLayer(config)
  667. # self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
  668. #
  669. # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
  670. # all_encoder_layers = []
  671. # for layer_module in self.layer:
  672. # hidden_states = layer_module(hidden_states, attention_mask)
  673. # if output_all_encoded_layers:
  674. # all_encoder_layers.append(hidden_states)
  675. # if not output_all_encoded_layers:
  676. # all_encoder_layers.append(hidden_states)
  677. # return all_encoder_layers
  678. class BertPooler(nn.Module):
  679. def __init__(self, config):
  680. super(BertPooler, self).__init__()
  681. self.dense_act = LinearActivation(config.hidden_size,
  682. config.hidden_size,
  683. act="tanh")
  684. def forward(self, hidden_states):
  685. # We "pool" the model by simply taking the hidden state corresponding
  686. # to the first token.
  687. first_token_tensor = hidden_states[:, 0]
  688. pooled_output = self.dense_act(first_token_tensor)
  689. return pooled_output
  690. class BertPredictionHeadTransform(nn.Module):
  691. def __init__(self, config):
  692. super(BertPredictionHeadTransform, self).__init__()
  693. self.dense_act = LinearActivation(config.hidden_size,
  694. config.hidden_size,
  695. act=config.hidden_act)
  696. self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
  697. def forward(self, hidden_states):
  698. hidden_states = self.dense_act(hidden_states)
  699. hidden_states = self.LayerNorm(hidden_states)
  700. return hidden_states
  701. class BertLMPredictionHead(nn.Module):
  702. def __init__(self, config, bert_model_embedding_weights):
  703. super(BertLMPredictionHead, self).__init__()
  704. self.transform = BertPredictionHeadTransform(config)
  705. # The output weights are the same as the input embeddings, but there is
  706. # an output-only bias for each token.
  707. self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
  708. bert_model_embedding_weights.size(0),
  709. bias=False)
  710. self.decoder.weight = bert_model_embedding_weights
  711. self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
  712. def forward(self, hidden_states):
  713. hidden_states = self.transform(hidden_states)
  714. torch.cuda.nvtx.range_push(
  715. "decoder input.size() = {}, weight.size() = {}".format(
  716. hidden_states.size(),
  717. self.decoder.weight.size()))
  718. hidden_states = self.decoder(hidden_states) + self.bias
  719. torch.cuda.nvtx.range_pop()
  720. return hidden_states
  721. class BertOnlyMLMHead(nn.Module):
  722. def __init__(self, config, bert_model_embedding_weights):
  723. super(BertOnlyMLMHead, self).__init__()
  724. self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
  725. def forward(self, sequence_output):
  726. prediction_scores = self.predictions(sequence_output)
  727. return prediction_scores
  728. class BertOnlyNSPHead(nn.Module):
  729. def __init__(self, config):
  730. super(BertOnlyNSPHead, self).__init__()
  731. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  732. def forward(self, pooled_output):
  733. seq_relationship_score = self.seq_relationship(pooled_output)
  734. return seq_relationship_score
  735. class BertPreTrainingHeads(nn.Module):
  736. def __init__(self, config, bert_model_embedding_weights):
  737. super(BertPreTrainingHeads, self).__init__()
  738. self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
  739. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  740. def forward(self, sequence_output, pooled_output):
  741. prediction_scores = self.predictions(sequence_output)
  742. seq_relationship_score = self.seq_relationship(pooled_output)
  743. return prediction_scores, seq_relationship_score
  744. class BertPreTrainedModel(nn.Module):
  745. """ An abstract class to handle weights initialization and
  746. a simple interface for dowloading and loading pretrained models.
  747. """
  748. def __init__(self, config, *inputs, **kwargs):
  749. super(BertPreTrainedModel, self).__init__()
  750. if not isinstance(config, BertConfig):
  751. raise ValueError(
  752. "Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
  753. "To create a model from a Google pretrained model use "
  754. "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
  755. self.__class__.__name__,
  756. self.__class__.__name__))
  757. self.config = config
  758. def init_bert_weights(self, module):
  759. """ Initialize the weights.
  760. """
  761. if isinstance(module, (nn.Linear, nn.Embedding)):
  762. # Slightly different from the TF version which uses truncated_normal for initialization
  763. # cf https://github.com/pytorch/pytorch/pull/5617
  764. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  765. elif isinstance(module, BertLayerNorm):
  766. module.bias.data.zero_()
  767. module.weight.data.fill_(1.0)
  768. if isinstance(module, nn.Linear) and module.bias is not None:
  769. module.bias.data.zero_()
  770. @classmethod
  771. def from_pretrained(cls,
  772. pretrained_model_name_or_path,
  773. state_dict=None,
  774. cache_dir=None,
  775. from_tf=False,
  776. *inputs,
  777. **kwargs):
  778. """
  779. Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
  780. Download and cache the pre-trained model file if needed.
  781. Params:
  782. pretrained_model_name_or_path: either:
  783. - a str with the name of a pre-trained model to load selected in the list of:
  784. . `bert-base-uncased`
  785. . `bert-large-uncased`
  786. . `bert-base-cased`
  787. . `bert-large-cased`
  788. . `bert-base-multilingual-uncased`
  789. . `bert-base-multilingual-cased`
  790. . `bert-base-chinese`
  791. - a path or url to a pretrained model archive containing:
  792. . `bert_config.json` a configuration file for the model
  793. . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
  794. - a path or url to a pretrained model archive containing:
  795. . `bert_config.json` a configuration file for the model
  796. . `model.chkpt` a TensorFlow checkpoint
  797. from_tf: should we load the weights from a locally saved TensorFlow checkpoint
  798. cache_dir: an optional path to a folder in which the pre-trained models will be cached.
  799. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
  800. *inputs, **kwargs: additional input for the specific Bert class
  801. (ex: num_labels for BertForSequenceClassification)
  802. """
  803. if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
  804. archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
  805. else:
  806. archive_file = pretrained_model_name_or_path
  807. if resolved_archive_file == archive_file:
  808. logger.info("loading archive file {}".format(archive_file))
  809. else:
  810. logger.info("loading archive file {} from cache at {}".format(
  811. archive_file,
  812. resolved_archive_file))
  813. tempdir = None
  814. if os.path.isdir(resolved_archive_file) or from_tf:
  815. serialization_dir = resolved_archive_file
  816. else:
  817. # Extract archive to temp dir
  818. tempdir = tempfile.mkdtemp()
  819. logger.info("extracting archive file {} to temp dir {}".format(
  820. resolved_archive_file,
  821. tempdir))
  822. with tarfile.open(resolved_archive_file, 'r:gz') as archive:
  823. archive.extractall(tempdir)
  824. serialization_dir = tempdir
  825. # Load config
  826. config_file = os.path.join(serialization_dir, CONFIG_NAME)
  827. config = BertConfig.from_json_file(config_file)
  828. logger.info("Model config {}".format(config))
  829. # Instantiate model.
  830. model = cls(config, *inputs, **kwargs)
  831. if state_dict is None and not from_tf:
  832. weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
  833. state_dict = torch.load(
  834. weights_path,
  835. map_location='cpu' if not torch.cuda.is_available() else None)
  836. if tempdir:
  837. # Clean up temp dir
  838. shutil.rmtree(tempdir)
  839. if from_tf:
  840. # Directly load from a TensorFlow checkpoint
  841. weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
  842. return load_tf_weights_in_bert(model, weights_path)
  843. # Load from a PyTorch state_dict
  844. old_keys = []
  845. new_keys = []
  846. for key in state_dict.keys():
  847. new_key = None
  848. if 'gamma' in key:
  849. new_key = key.replace('gamma', 'weight')
  850. if 'beta' in key:
  851. new_key = key.replace('beta', 'bias')
  852. if new_key:
  853. old_keys.append(key)
  854. new_keys.append(new_key)
  855. for old_key, new_key in zip(old_keys, new_keys):
  856. state_dict[new_key] = state_dict.pop(old_key)
  857. missing_keys = []
  858. unexpected_keys = []
  859. error_msgs = []
  860. # copy state_dict so _load_from_state_dict can modify it
  861. metadata = getattr(state_dict, '_metadata', None)
  862. state_dict = state_dict.copy()
  863. if metadata is not None:
  864. state_dict._metadata = metadata
  865. def load(module, prefix=''):
  866. local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
  867. module._load_from_state_dict(state_dict,
  868. prefix,
  869. local_metadata,
  870. True,
  871. missing_keys,
  872. unexpected_keys,
  873. error_msgs)
  874. for name, child in module._modules.items():
  875. if child is not None:
  876. load(child, prefix + name + '.')
  877. start_prefix = ''
  878. if not hasattr(model,
  879. 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
  880. start_prefix = 'bert.'
  881. load(model, prefix=start_prefix)
  882. if len(missing_keys) > 0:
  883. logger.info("Weights of {} not initialized from pretrained model: {}".format(
  884. model.__class__.__name__,
  885. missing_keys))
  886. if len(unexpected_keys) > 0:
  887. logger.info("Weights from pretrained model not used in {}: {}".format(
  888. model.__class__.__name__,
  889. unexpected_keys))
  890. if len(error_msgs) > 0:
  891. raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
  892. model.__class__.__name__,
  893. "\n\t".join(error_msgs)))
  894. return model
  895. class BertModel(BertPreTrainedModel):
  896. """BERT model ("Bidirectional Embedding Representations from a Transformer").
  897. Params:
  898. config: a BertConfig class instance with the configuration to build a new model
  899. Inputs:
  900. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  901. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  902. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  903. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  904. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  905. a `sentence B` token (see BERT paper for more details).
  906. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  907. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  908. input sequence length in the current batch. It's the mask that we typically use for attention when
  909. a batch has varying length sentences.
  910. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
  911. Outputs: Tuple of (encoded_layers, pooled_output)
  912. `encoded_layers`: controled by `output_all_encoded_layers` argument:
  913. - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
  914. of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
  915. encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
  916. - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
  917. to the last attention block of shape [batch_size, sequence_length, hidden_size],
  918. `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
  919. classifier pretrained on top of the hidden state associated to the first character of the
  920. input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
  921. Example usage:
  922. ```python
  923. # Already been converted into WordPiece token ids
  924. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  925. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  926. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  927. config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  928. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  929. model = modeling.BertModel(config=config)
  930. all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
  931. ```
  932. """
  933. def __init__(self, config):
  934. super(BertModel, self).__init__(config)
  935. self.embeddings = BertEmbeddings(config)
  936. self.encoder = BertEncoder(config)
  937. self.pooler = BertPooler(config)
  938. self.apply(self.init_bert_weights)
  939. def forward(self,
  940. input_ids,
  941. token_type_ids=None,
  942. attention_mask=None,
  943. output_all_encoded_layers=True,
  944. checkpoint_activations=False):
  945. if attention_mask is None:
  946. attention_mask = torch.ones_like(input_ids)
  947. if token_type_ids is None:
  948. token_type_ids = torch.zeros_like(input_ids)
  949. # We create a 3D attention mask from a 2D tensor mask.
  950. # Sizes are [batch_size, 1, 1, to_seq_length]
  951. # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
  952. # this attention mask is more simple than the triangular masking of causal attention
  953. # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
  954. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
  955. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  956. # masked positions, this operation will create a tensor which is 0.0 for
  957. # positions we want to attend and -10000.0 for masked positions.
  958. # Since we are adding it to the raw scores before the softmax, this is
  959. # effectively the same as removing these entirely.
  960. extended_attention_mask = extended_attention_mask.to(dtype=next(
  961. self.parameters()).dtype) # fp16 compatibility
  962. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
  963. embedding_output = self.embeddings(input_ids, token_type_ids)
  964. encoded_layers = self.encoder(
  965. embedding_output,
  966. extended_attention_mask,
  967. output_all_encoded_layers=output_all_encoded_layers,
  968. checkpoint_activations=checkpoint_activations)
  969. sequence_output = encoded_layers[-1]
  970. pooled_output = self.pooler(sequence_output)
  971. if not output_all_encoded_layers:
  972. encoded_layers = encoded_layers[-1]
  973. return encoded_layers, pooled_output
  974. class BertForPreTraining(BertPreTrainedModel):
  975. """BERT model with pre-training heads.
  976. This module comprises the BERT model followed by the two pre-training heads:
  977. - the masked language modeling head, and
  978. - the next sentence classification head.
  979. Params:
  980. config: a BertConfig class instance with the configuration to build a new model.
  981. Inputs:
  982. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  983. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  984. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  985. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  986. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  987. a `sentence B` token (see BERT paper for more details).
  988. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  989. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  990. input sequence length in the current batch. It's the mask that we typically use for attention when
  991. a batch has varying length sentences.
  992. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
  993. with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
  994. is only computed for the labels set in [0, ..., vocab_size]
  995. `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
  996. with indices selected in [0, 1].
  997. 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
  998. Outputs:
  999. if `masked_lm_labels` and `next_sentence_label` are not `None`:
  1000. Outputs the total_loss which is the sum of the masked language modeling loss and the next
  1001. sentence classification loss.
  1002. if `masked_lm_labels` or `next_sentence_label` is `None`:
  1003. Outputs a tuple comprising
  1004. - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
  1005. - the next sentence classification logits of shape [batch_size, 2].
  1006. Example usage:
  1007. ```python
  1008. # Already been converted into WordPiece token ids
  1009. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1010. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1011. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1012. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1013. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1014. model = BertForPreTraining(config)
  1015. masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
  1016. ```
  1017. """
  1018. def __init__(self, config, args):
  1019. super(BertForPreTraining, self).__init__(config)
  1020. self.summary_writer = None
  1021. if dist.get_rank() == 0:
  1022. self.summary_writer = args.summary_writer
  1023. self.samples_per_step = dist.get_world_size() * args.train_batch_size
  1024. self.sample_count = self.samples_per_step
  1025. self.bert = BertModel(config)
  1026. self.cls = BertPreTrainingHeads(config,
  1027. self.bert.embeddings.word_embeddings.weight)
  1028. self.apply(self.init_bert_weights)
  1029. def log_summary_writer(self, logs: dict, base='Train'):
  1030. if dist.get_rank() == 0:
  1031. module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type))
  1032. for key, log in logs.items():
  1033. self.summary_writer.add_scalar(f'{base}/{module_name}/{key}',
  1034. log,
  1035. self.sample_count)
  1036. self.sample_count += self.samples_per_step
  1037. def forward(self, batch, log=True):
  1038. #input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False):
  1039. input_ids = batch[1]
  1040. token_type_ids = batch[3]
  1041. attention_mask = batch[2]
  1042. masked_lm_labels = batch[5]
  1043. next_sentence_label = batch[4]
  1044. checkpoint_activations = False
  1045. sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
  1046. output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)
  1047. prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
  1048. if masked_lm_labels is not None and next_sentence_label is not None:
  1049. loss_fct = CrossEntropyLoss(ignore_index=-1)
  1050. masked_lm_loss = loss_fct(prediction_scores.view(-1,
  1051. self.config.vocab_size),
  1052. masked_lm_labels.view(-1))
  1053. next_sentence_loss = loss_fct(seq_relationship_score.view(-1,
  1054. 2),
  1055. next_sentence_label.view(-1))
  1056. #print("loss is {} {}".format(masked_lm_loss, next_sentence_loss))
  1057. total_loss = masked_lm_loss + next_sentence_loss
  1058. # if log:
  1059. # self.log_summary_writer(logs={'train_loss': total_loss.item()})
  1060. return total_loss
  1061. else:
  1062. return prediction_scores, seq_relationship_score
  1063. class BertForMaskedLM(BertPreTrainedModel):
  1064. """BERT model with the masked language modeling head.
  1065. This module comprises the BERT model followed by the masked language modeling head.
  1066. Params:
  1067. config: a BertConfig class instance with the configuration to build a new model.
  1068. Inputs:
  1069. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  1070. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1071. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1072. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  1073. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  1074. a `sentence B` token (see BERT paper for more details).
  1075. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  1076. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1077. input sequence length in the current batch. It's the mask that we typically use for attention when
  1078. a batch has varying length sentences.
  1079. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
  1080. with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
  1081. is only computed for the labels set in [0, ..., vocab_size]
  1082. Outputs:
  1083. if `masked_lm_labels` is not `None`:
  1084. Outputs the masked language modeling loss.
  1085. if `masked_lm_labels` is `None`:
  1086. Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
  1087. Example usage:
  1088. ```python
  1089. # Already been converted into WordPiece token ids
  1090. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1091. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1092. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1093. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1094. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1095. model = BertForMaskedLM(config)
  1096. masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
  1097. ```
  1098. """
  1099. def __init__(self, config):
  1100. super(BertForMaskedLM, self).__init__(config)
  1101. self.bert = BertModel(config)
  1102. self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
  1103. self.apply(self.init_bert_weights)
  1104. def forward(self,
  1105. input_ids,
  1106. token_type_ids=None,
  1107. attention_mask=None,
  1108. masked_lm_labels=None,
  1109. checkpoint_activations=False):
  1110. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
  1111. output_all_encoded_layers=False)
  1112. prediction_scores = self.cls(sequence_output)
  1113. if masked_lm_labels is not None:
  1114. loss_fct = CrossEntropyLoss(ignore_index=-1)
  1115. masked_lm_loss = loss_fct(prediction_scores.view(-1,
  1116. self.config.vocab_size),
  1117. masked_lm_labels.view(-1))
  1118. return masked_lm_loss
  1119. else:
  1120. return prediction_scores
  1121. class BertForNextSentencePrediction(BertPreTrainedModel):
  1122. """BERT model with next sentence prediction head.
  1123. This module comprises the BERT model followed by the next sentence classification head.
  1124. Params:
  1125. config: a BertConfig class instance with the configuration to build a new model.
  1126. Inputs:
  1127. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  1128. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1129. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1130. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  1131. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  1132. a `sentence B` token (see BERT paper for more details).
  1133. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  1134. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1135. input sequence length in the current batch. It's the mask that we typically use for attention when
  1136. a batch has varying length sentences.
  1137. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
  1138. with indices selected in [0, 1].
  1139. 0 => next sentence is the continuation, 1 => next sentence is a random sentence.
  1140. Outputs:
  1141. if `next_sentence_label` is not `None`:
  1142. Outputs the total_loss which is the sum of the masked language modeling loss and the next
  1143. sentence classification loss.
  1144. if `next_sentence_label` is `None`:
  1145. Outputs the next sentence classification logits of shape [batch_size, 2].
  1146. Example usage:
  1147. ```python
  1148. # Already been converted into WordPiece token ids
  1149. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1150. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1151. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1152. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1153. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1154. model = BertForNextSentencePrediction(config)
  1155. seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
  1156. ```
  1157. """
  1158. def __init__(self, config):
  1159. super(BertForNextSentencePrediction, self).__init__(config)
  1160. self.bert = BertModel(config)
  1161. self.cls = BertOnlyNSPHead(config)
  1162. self.apply(self.init_bert_weights)
  1163. def forward(self,
  1164. input_ids,
  1165. token_type_ids=None,
  1166. attention_mask=None,
  1167. next_sentence_label=None,
  1168. checkpoint_activations=False):
  1169. _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
  1170. output_all_encoded_layers=False)
  1171. seq_relationship_score = self.cls(pooled_output)
  1172. if next_sentence_label is not None:
  1173. loss_fct = CrossEntropyLoss(ignore_index=-1)
  1174. next_sentence_loss = loss_fct(seq_relationship_score.view(-1,
  1175. 2),
  1176. next_sentence_label.view(-1))
  1177. return next_sentence_loss
  1178. else:
  1179. return seq_relationship_score
  1180. class BertForSequenceClassification(BertPreTrainedModel):
  1181. """BERT model for classification.
  1182. This module is composed of the BERT model with a linear layer on top of
  1183. the pooled output.
  1184. Params:
  1185. `config`: a BertConfig class instance with the configuration to build a new model.
  1186. `num_labels`: the number of classes for the classifier. Default = 2.
  1187. Inputs:
  1188. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  1189. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1190. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1191. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  1192. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  1193. a `sentence B` token (see BERT paper for more details).
  1194. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  1195. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1196. input sequence length in the current batch. It's the mask that we typically use for attention when
  1197. a batch has varying length sentences.
  1198. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
  1199. with indices selected in [0, ..., num_labels].
  1200. Outputs:
  1201. if `labels` is not `None`:
  1202. Outputs the CrossEntropy classification loss of the output with the labels.
  1203. if `labels` is `None`:
  1204. Outputs the classification logits of shape [batch_size, num_labels].
  1205. Example usage:
  1206. ```python
  1207. # Already been converted into WordPiece token ids
  1208. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1209. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1210. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1211. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1212. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1213. num_labels = 2
  1214. model = BertForSequenceClassification(config, num_labels)
  1215. logits = model(input_ids, token_type_ids, input_mask)
  1216. ```
  1217. """
  1218. def __init__(self, config, num_labels):
  1219. super(BertForSequenceClassification, self).__init__(config)
  1220. self.num_labels = num_labels
  1221. self.bert = BertModel(config)
  1222. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1223. self.classifier = nn.Linear(config.hidden_size, num_labels)
  1224. self.apply(self.init_bert_weights)
  1225. def forward(self,
  1226. input_ids,
  1227. token_type_ids=None,
  1228. attention_mask=None,
  1229. labels=None,
  1230. checkpoint_activations=False):
  1231. _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  1232. pooled_output = self.dropout(pooled_output)
  1233. logits = self.classifier(pooled_output)
  1234. if labels is not None:
  1235. loss_fct = CrossEntropyLoss()
  1236. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1237. return loss
  1238. else:
  1239. return logits
  1240. class BertForMultipleChoice(BertPreTrainedModel):
  1241. """BERT model for multiple choice tasks.
  1242. This module is composed of the BERT model with a linear layer on top of
  1243. the pooled output.
  1244. Params:
  1245. `config`: a BertConfig class instance with the configuration to build a new model.
  1246. `num_choices`: the number of classes for the classifier. Default = 2.
  1247. Inputs:
  1248. `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
  1249. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1250. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1251. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
  1252. with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
  1253. and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
  1254. `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
  1255. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1256. input sequence length in the current batch. It's the mask that we typically use for attention when
  1257. a batch has varying length sentences.
  1258. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
  1259. with indices selected in [0, ..., num_choices].
  1260. Outputs:
  1261. if `labels` is not `None`:
  1262. Outputs the CrossEntropy classification loss of the output with the labels.
  1263. if `labels` is `None`:
  1264. Outputs the classification logits of shape [batch_size, num_labels].
  1265. Example usage:
  1266. ```python
  1267. # Already been converted into WordPiece token ids
  1268. input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
  1269. input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
  1270. token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
  1271. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1272. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1273. num_choices = 2
  1274. model = BertForMultipleChoice(config, num_choices)
  1275. logits = model(input_ids, token_type_ids, input_mask)
  1276. ```
  1277. """
  1278. def __init__(self, config, num_choices):
  1279. super(BertForMultipleChoice, self).__init__(config)
  1280. self.num_choices = num_choices
  1281. self.bert = BertModel(config)
  1282. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1283. self.classifier = nn.Linear(config.hidden_size, 1)
  1284. self.apply(self.init_bert_weights)
  1285. def forward(self,
  1286. input_ids,
  1287. token_type_ids=None,
  1288. attention_mask=None,
  1289. labels=None,
  1290. checkpoint_activations=False):
  1291. flat_input_ids = input_ids.view(-1, input_ids.size(-1))
  1292. flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
  1293. flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
  1294. _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
  1295. pooled_output = self.dropout(pooled_output)
  1296. logits = self.classifier(pooled_output)
  1297. reshaped_logits = logits.view(-1, self.num_choices)
  1298. if labels is not None:
  1299. loss_fct = CrossEntropyLoss()
  1300. loss = loss_fct(reshaped_logits, labels)
  1301. return loss
  1302. else:
  1303. return reshaped_logits
  1304. class BertForTokenClassification(BertPreTrainedModel):
  1305. """BERT model for token-level classification.
  1306. This module is composed of the BERT model with a linear layer on top of
  1307. the full hidden state of the last layer.
  1308. Params:
  1309. `config`: a BertConfig class instance with the configuration to build a new model.
  1310. `num_labels`: the number of classes for the classifier. Default = 2.
  1311. Inputs:
  1312. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  1313. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1314. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1315. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  1316. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  1317. a `sentence B` token (see BERT paper for more details).
  1318. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  1319. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1320. input sequence length in the current batch. It's the mask that we typically use for attention when
  1321. a batch has varying length sentences.
  1322. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
  1323. with indices selected in [0, ..., num_labels].
  1324. Outputs:
  1325. if `labels` is not `None`:
  1326. Outputs the CrossEntropy classification loss of the output with the labels.
  1327. if `labels` is `None`:
  1328. Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
  1329. Example usage:
  1330. ```python
  1331. # Already been converted into WordPiece token ids
  1332. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1333. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1334. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1335. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1336. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1337. num_labels = 2
  1338. model = BertForTokenClassification(config, num_labels)
  1339. logits = model(input_ids, token_type_ids, input_mask)
  1340. ```
  1341. """
  1342. def __init__(self, config, num_labels):
  1343. super(BertForTokenClassification, self).__init__(config)
  1344. self.num_labels = num_labels
  1345. self.bert = BertModel(config)
  1346. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1347. self.classifier = nn.Linear(config.hidden_size, num_labels)
  1348. self.apply(self.init_bert_weights)
  1349. def forward(self,
  1350. input_ids,
  1351. token_type_ids=None,
  1352. attention_mask=None,
  1353. labels=None,
  1354. checkpoint_activations=False):
  1355. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  1356. sequence_output = self.dropout(sequence_output)
  1357. logits = self.classifier(sequence_output)
  1358. if labels is not None:
  1359. loss_fct = CrossEntropyLoss()
  1360. # Only keep active parts of the loss
  1361. if attention_mask is not None:
  1362. active_loss = attention_mask.view(-1) == 1
  1363. active_logits = logits.view(-1, self.num_labels)[active_loss]
  1364. active_labels = labels.view(-1)[active_loss]
  1365. loss = loss_fct(active_logits, active_labels)
  1366. else:
  1367. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1368. return loss
  1369. else:
  1370. return logits
  1371. class BertForQuestionAnswering(BertPreTrainedModel):
  1372. """BERT model for Question Answering (span extraction).
  1373. This module is composed of the BERT model with a linear layer on top of
  1374. the sequence output that computes start_logits and end_logits
  1375. Params:
  1376. `config`: a BertConfig class instance with the configuration to build a new model.
  1377. Inputs:
  1378. `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
  1379. with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
  1380. `extract_features.py`, `run_classifier.py` and `run_squad.py`)
  1381. `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
  1382. types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
  1383. a `sentence B` token (see BERT paper for more details).
  1384. `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
  1385. selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
  1386. input sequence length in the current batch. It's the mask that we typically use for attention when
  1387. a batch has varying length sentences.
  1388. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
  1389. Positions are clamped to the length of the sequence and position outside of the sequence are not taken
  1390. into account for computing the loss.
  1391. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
  1392. Positions are clamped to the length of the sequence and position outside of the sequence are not taken
  1393. into account for computing the loss.
  1394. Outputs:
  1395. if `start_positions` and `end_positions` are not `None`:
  1396. Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
  1397. if `start_positions` or `end_positions` is `None`:
  1398. Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
  1399. position tokens of shape [batch_size, sequence_length].
  1400. Example usage:
  1401. ```python
  1402. # Already been converted into WordPiece token ids
  1403. input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
  1404. input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
  1405. token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
  1406. config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
  1407. num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
  1408. model = BertForQuestionAnswering(config)
  1409. start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
  1410. ```
  1411. """
  1412. def __init__(self, config):
  1413. super(BertForQuestionAnswering, self).__init__(config)
  1414. self.bert = BertModel(config)
  1415. # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
  1416. # self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1417. self.qa_outputs = nn.Linear(config.hidden_size, 2)
  1418. self.apply(self.init_bert_weights)
  1419. def forward(self,
  1420. input_ids,
  1421. token_type_ids=None,
  1422. attention_mask=None,
  1423. start_positions=None,
  1424. end_positions=None,
  1425. checkpoint_activations=False):
  1426. sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
  1427. logits = self.qa_outputs(sequence_output)
  1428. start_logits, end_logits = logits.split(1, dim=-1)
  1429. start_logits = start_logits.squeeze(-1)
  1430. end_logits = end_logits.squeeze(-1)
  1431. if start_positions is not None and end_positions is not None:
  1432. # If we are on multi-GPU, split add a dimension
  1433. if len(start_positions.size()) > 1:
  1434. start_positions = start_positions.squeeze(-1)
  1435. if len(end_positions.size()) > 1:
  1436. end_positions = end_positions.squeeze(-1)
  1437. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1438. ignored_index = start_logits.size(1)
  1439. start_positions.clamp_(0, ignored_index)
  1440. end_positions.clamp_(0, ignored_index)
  1441. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1442. start_loss = loss_fct(start_logits, start_positions)
  1443. end_loss = loss_fct(end_logits, end_positions)
  1444. total_loss = (start_loss + end_loss) / 2
  1445. return total_loss
  1446. else:
  1447. return start_logits, end_logits