dsfsd
fgbhfg
206
.idea/workspace.xml
generated
@ -21,8 +21,20 @@
|
||||
<entry file="file://$PROJECT_DIR$/Content.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="405">
|
||||
<caret line="15" column="103" lean-forward="true" selection-start-line="15" selection-start-column="20" selection-end-line="15" selection-end-column="103" />
|
||||
<first_editor relative-caret-position="471">
|
||||
<caret line="34" lean-forward="true" selection-start-line="34" selection-end-line="34" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
</file>
|
||||
<file pinned="false" current-in-tab="false">
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/PyTorch_Introuction.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="481">
|
||||
<caret line="329" column="5" selection-start-line="329" selection-start-column="5" selection-end-line="329" selection-end-column="5" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -33,8 +45,20 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/DeepLearning NLP.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="539">
|
||||
<caret line="24" column="30" selection-start-line="24" selection-start-column="30" selection-end-line="24" selection-end-column="30" />
|
||||
<first_editor relative-caret-position="359">
|
||||
<caret line="25" column="1" selection-start-line="25" selection-start-column="1" selection-end-line="25" selection-end-column="1" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
</file>
|
||||
<file pinned="false" current-in-tab="false">
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/DL_with_PyTorch.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="553">
|
||||
<caret line="320" selection-start-line="320" selection-end-line="320" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -45,8 +69,8 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Word_Embedding.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" lean-forward="true" selection-start-column="2" selection-end-column="15" />
|
||||
<first_editor relative-caret-position="425">
|
||||
<caret line="248" column="205" selection-start-line="248" selection-start-column="205" selection-end-line="248" selection-end-column="205" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -57,32 +81,8 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Sequence_and_LSTM_Network.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" lean-forward="true" selection-start-column="2" selection-end-column="20" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
</file>
|
||||
<file pinned="false" current-in-tab="false">
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Dynamic_Desicion_Bi-LSTM.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" selection-start-column="2" selection-end-column="29" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
</file>
|
||||
<file pinned="false" current-in-tab="false">
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Char RNN Generation.MD">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="498">
|
||||
<caret line="384" selection-start-line="384" selection-end-line="384" selection-end-column="9" />
|
||||
<first_editor relative-caret-position="2146">
|
||||
<caret line="239" column="17" selection-start-line="239" selection-start-column="17" selection-end-line="239" selection-end-column="17" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -127,19 +127,20 @@
|
||||
<option value="$PROJECT_DIR$/FifthSection/Char RNN Classification.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Translation_S2S Network.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/PyTorch_Introuctiion.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/DL_with_PyTorch.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Word_Embedding.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Sequence_and_LSTM_Network.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Dynamic_Desicion_Bi-LSTM.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/DeepLearning NLP.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Dynamic_Desicion_Bi-LSTM.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/PyTorch_Introuction.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/DL_with_PyTorch.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Sequence_and_LSTM_Network.md" />
|
||||
<option value="$PROJECT_DIR$/FifthSection/Word_Embedding.md" />
|
||||
</list>
|
||||
</option>
|
||||
</component>
|
||||
<component name="ProjectFrameBounds" extendedState="6">
|
||||
<option name="x" value="12" />
|
||||
<option name="y" value="57" />
|
||||
<component name="ProjectFrameBounds" extendedState="1">
|
||||
<option name="x" value="42" />
|
||||
<option name="y" value="268" />
|
||||
<option name="width" value="2067" />
|
||||
<option name="height" value="1152" />
|
||||
<option name="height" value="1131" />
|
||||
</component>
|
||||
<component name="ProjectView">
|
||||
<navigator proportions="" version="1">
|
||||
@ -161,7 +162,13 @@
|
||||
<path>
|
||||
<item name="PyTorchTrans" type="b2602c69:ProjectViewProjectNode" />
|
||||
<item name="PyTorchTrans" type="462c0819:PsiDirectoryNode" />
|
||||
<item name="ThirdSection" type="462c0819:PsiDirectoryNode" />
|
||||
<item name="FifthSection" type="462c0819:PsiDirectoryNode" />
|
||||
<item name="notation" type="462c0819:PsiDirectoryNode" />
|
||||
</path>
|
||||
<path>
|
||||
<item name="PyTorchTrans" type="b2602c69:ProjectViewProjectNode" />
|
||||
<item name="PyTorchTrans" type="462c0819:PsiDirectoryNode" />
|
||||
<item name="fourSection" type="462c0819:PsiDirectoryNode" />
|
||||
</path>
|
||||
</expand>
|
||||
<select />
|
||||
@ -176,7 +183,7 @@
|
||||
<property name="last_opened_file_path" value="$PROJECT_DIR$/FifthSection/notation" />
|
||||
<property name="nodejs_interpreter_path.stuck_in_default_project" value="undefined stuck path" />
|
||||
<property name="nodejs_npm_path_reset_for_default_project" value="true" />
|
||||
<property name="settings.editor.selected.configurable" value="configurable.group.tools" />
|
||||
<property name="settings.editor.selected.configurable" value="preferences.pluginManager" />
|
||||
</component>
|
||||
<component name="PyConsoleOptionsProvider">
|
||||
<option name="myPythonConsoleState">
|
||||
@ -186,18 +193,19 @@
|
||||
</option>
|
||||
</component>
|
||||
<component name="RecentsManager">
|
||||
<key name="MoveFile.RECENT_KEYS">
|
||||
<recent name="E:\github project\PyTorchTrans\fourSection\notation" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\Image" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\SecondSection" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\FirstSection" />
|
||||
</key>
|
||||
<key name="CopyFile.RECENT_KEYS">
|
||||
<recent name="E:\github project\PyTorchTrans\FifthSection\notation" />
|
||||
<recent name="E:\github project\PyTorchTrans\fourSection\image" />
|
||||
<recent name="E:\github project\PyTorchTrans\fourSection\notation" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\Image" />
|
||||
</key>
|
||||
<key name="MoveFile.RECENT_KEYS">
|
||||
<recent name="E:\github project\PyTorchTrans\FifthSection\notation" />
|
||||
<recent name="E:\github project\PyTorchTrans\fourSection\notation" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\Image" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\SecondSection" />
|
||||
<recent name="G:\github clone project\PyTorchDocs\FirstSection" />
|
||||
</key>
|
||||
</component>
|
||||
<component name="RunDashboard">
|
||||
<option name="ruleStates">
|
||||
@ -252,10 +260,10 @@
|
||||
<option name="totallyTimeSpent" value="12000" />
|
||||
</component>
|
||||
<component name="ToolWindowManager">
|
||||
<frame x="-7" y="-7" width="1455" height="935" extended-state="6" />
|
||||
<frame x="28" y="179" width="1378" height="754" extended-state="0" />
|
||||
<editor active="true" />
|
||||
<layout>
|
||||
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.26286116" />
|
||||
<window_info content_ui="combo" id="Project" order="0" visible="true" weight="0.07824143" />
|
||||
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
|
||||
<window_info id="Favorites" order="2" side_tool="true" />
|
||||
<window_info anchor="bottom" id="Message" order="0" />
|
||||
@ -269,7 +277,7 @@
|
||||
<window_info anchor="bottom" id="Version Control" order="8" />
|
||||
<window_info anchor="bottom" id="Database Changes" order="9" />
|
||||
<window_info anchor="bottom" id="Event Log" order="10" side_tool="true" />
|
||||
<window_info anchor="bottom" id="Terminal" order="11" visible="true" weight="0.05977584" />
|
||||
<window_info active="true" anchor="bottom" id="Terminal" order="11" visible="true" weight="0.09967846" />
|
||||
<window_info anchor="bottom" id="Python Console" order="12" />
|
||||
<window_info anchor="right" id="Commander" order="0" weight="0.4" />
|
||||
<window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
|
||||
@ -424,36 +432,6 @@
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Char RNN Generation.MD">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="498">
|
||||
<caret line="384" selection-start-line="384" selection-end-line="384" selection-end-column="9" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/PyTorch_Introuctiion.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="11" selection-start-column="11" selection-end-column="11" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/DL_with_PyTorch.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="17" selection-start-column="17" selection-end-column="17" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Translation_S2S Network.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
@ -464,16 +442,6 @@
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Word_Embedding.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" lean-forward="true" selection-start-column="2" selection-end-column="15" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Char RNN Classification.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
@ -484,11 +452,21 @@
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Char RNN Generation.MD">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="647">
|
||||
<caret line="384" selection-start-line="384" selection-end-line="384" selection-end-column="9" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/Content.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="405">
|
||||
<caret line="15" column="103" lean-forward="true" selection-start-line="15" selection-start-column="20" selection-end-line="15" selection-end-column="103" />
|
||||
<first_editor relative-caret-position="471">
|
||||
<caret line="34" lean-forward="true" selection-start-line="34" selection-end-line="34" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -497,8 +475,18 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Dynamic_Desicion_Bi-LSTM.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" selection-start-column="2" selection-end-column="29" />
|
||||
<first_editor relative-caret-position="587">
|
||||
<caret line="304" lean-forward="true" selection-start-line="304" selection-end-line="304" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/DL_with_PyTorch.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="553">
|
||||
<caret line="320" selection-start-line="320" selection-end-line="320" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -507,8 +495,28 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Sequence_and_LSTM_Network.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor>
|
||||
<caret column="2" lean-forward="true" selection-start-column="2" selection-end-column="20" />
|
||||
<first_editor relative-caret-position="2146">
|
||||
<caret line="239" column="17" selection-start-line="239" selection-start-column="17" selection-end-line="239" selection-end-column="17" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/Word_Embedding.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="425">
|
||||
<caret line="248" column="205" selection-start-line="248" selection-start-column="205" selection-end-line="248" selection-end-column="205" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
</provider>
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/PyTorch_Introuction.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="481">
|
||||
<caret line="329" column="5" selection-start-line="329" selection-start-column="5" selection-end-line="329" selection-end-column="5" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
@ -517,8 +525,8 @@
|
||||
<entry file="file://$PROJECT_DIR$/FifthSection/DeepLearning NLP.md">
|
||||
<provider selected="true" editor-type-id="split-provider[text-editor;markdown-preview-editor]">
|
||||
<state split_layout="SPLIT">
|
||||
<first_editor relative-caret-position="539">
|
||||
<caret line="24" column="30" selection-start-line="24" selection-start-column="30" selection-end-line="24" selection-end-column="30" />
|
||||
<first_editor relative-caret-position="359">
|
||||
<caret line="25" column="1" selection-start-line="25" selection-start-column="1" selection-end-line="25" selection-end-column="1" />
|
||||
</first_editor>
|
||||
<second_editor />
|
||||
</state>
|
||||
|
@ -1 +1,324 @@
|
||||
# 使用PyTorch进行深度学习
|
||||
# 使用PyTorch进行深度学习
|
||||
## 1.深度学习构建模块:仿射变换, 非线性函数以及目标函数
|
||||
深度学习表现为使用更巧妙的方法将线性函数和非线性函数进行组合。非线性函数的引入使得训练出来的模型更加强大。在本节中,我们将学
|
||||
习这些核心组件,建立目标函数,并理解模型是如何构建的。
|
||||
|
||||
#### 1.1 仿射变换
|
||||
深度学习的核心组件之一是仿射变换,仿射变换是一个关于矩阵A和向量x,b的.gif)函数,如下所示:
|
||||
|
||||
.gif)
|
||||
|
||||
对于矩阵A和向量x,b。 这里要学习的参数是A和b。 通常,b被称为偏差项。
|
||||
|
||||
PyTorch以及大多数的深度学习框架所做的事情都与传统的线性代数有些不同。它的映射输入是行而不是列。也就是说,下面代码输出的第i行
|
||||
是输入的第i行进行A变换,并加上偏移项的结果。看下面的例子:
|
||||
|
||||
```buildoutcfg
|
||||
# Author: Robert Guthrie
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
torch.manual_seed(1)
|
||||
```
|
||||
```buildoutcfg
|
||||
lin = nn.Linear(5, 3) # maps from R^5 to R^3, parameters A, b
|
||||
# data is 2x5. A maps from 5 to 3... can we map "data" under A?
|
||||
data = torch.randn(2, 5)
|
||||
print(lin(data)) # yes
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[ 0.1755, -0.3268, -0.5069],
|
||||
[-0.6602, 0.2260, 0.1089]], grad_fn=<AddmmBackward>)
|
||||
```
|
||||
|
||||
#### 1.2 非线性函数
|
||||
首先,注意以下这个例子,它将解释为什么我们需要非线性函数。假设我们有两个仿射变换=.gif) 和.gif)。
|
||||
那么).gif)又是什么呢?
|
||||
|
||||

|
||||
|
||||
是一个矩阵,是一个向量,可以看出,两个仿射变换的组合还是一个仿射变换。
|
||||
|
||||
由此可以看出,使用以上方法将多个仿射变换组合成的长链式的神经网络,相对于单个仿射变换并没有性能上的提升。
|
||||
|
||||
但是如果我们在两个仿射变换之间引入非线性,那么结果就大不一样了,我们可以构建出一个高性能的模型。
|
||||
|
||||
最常用的核心的非线性函数有:.gif),.gif),。你可能会想:
|
||||
“为什么是这些函数?明明有其他更多的非线性函数。”这些函数常用的原因是它们拥有可以容易计算的梯度,而计算梯度是学习的本质。例如:
|
||||
|
||||

|
||||
|
||||
> 注意:尽管你可能在AI课程的介绍中学习了一些神经网络,在这些神经网络中.gif)是默认非线性的,但是通常
|
||||
在实际使用的过程中都会避开它们。这是因为当参数的绝对值增长时,梯度会很快消失。小梯度意味着很难学习。因此大部分人默认选择
|
||||
tanh或者ReLU。
|
||||
|
||||
```buildoutcfg
|
||||
# 在pytorch中,大多数非线性都在torch.函数中(我们将它导入为F)
|
||||
# 请注意,非线性通常没有像仿射图那样的参数。
|
||||
# 也就是说,他们没有在训练期间更新的权重。
|
||||
data = torch.randn(2, 2)
|
||||
print(data)
|
||||
print(F.relu(data))
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[-0.5404, -2.2102],
|
||||
[ 2.1130, -0.0040]])
|
||||
tensor([[0.0000, 0.0000],
|
||||
[2.1130, 0.0000]])
|
||||
```
|
||||
|
||||
#### 1.3 Softmax和概率
|
||||
Softmax(x)也是一个非线性函数,但它的特殊之处在于,它通常是神经网络的最后一个操作。这是因为它接受实数向量,并且返回一个概率分
|
||||
布。它的定义如下。设x为实数向量(正、负,无论什么,没有约束)。然后Softmax(x)的第i个分量是:
|
||||
|
||||

|
||||
|
||||
很明显,输出的是一个概率分布:每一个元素都非负且和为1。
|
||||
|
||||
你也可以认为这只是一个对输入的元素进行的求幂运算符,使所有的内容都非负,然后除以规范化常量。
|
||||
|
||||
```buildoutcfg
|
||||
# Softmax也在torch.nn.functional中
|
||||
data = torch.randn(5)
|
||||
print(data)
|
||||
print(F.softmax(data, dim=0))
|
||||
print(F.softmax(data, dim=0).sum()) # 总和为1,因为它是一个分布!
|
||||
print(F.log_softmax(data, dim=0)) # theres also log_softmax
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([ 1.3800, -1.3505, 0.3455, 0.5046, 1.8213])
|
||||
tensor([0.2948, 0.0192, 0.1048, 0.1228, 0.4584])
|
||||
tensor(1.)
|
||||
tensor([-1.2214, -3.9519, -2.2560, -2.0969, -0.7801])
|
||||
```
|
||||
|
||||
#### 1.4 目标函数
|
||||
目标函数正是神经网络通过训练来最小化的函数(因此,它常常被称作损失函数或者成本函数)。这需要首先选择一个训练数据实例,通过神
|
||||
经网络运行它并计算输出的损失。然后通过损失函数的导数来更新模型的参数。因此直观来讲,如果它的结果是错误的,而模型完全信任他,
|
||||
那么损失将会很高。反之,当模型信任计算结果而结果正确时,损失会很低。
|
||||
|
||||
在你的训练实例中最小化损失函数的目的是使你的网络拥有很好的泛化能力,可以在开发数据集,测试数据集以及实际生产中拥有很小的损失。
|
||||
损失函数的一个例子是负对数似然损失函数,这个函数经常在多级分类中出现。在监督多级分类中,这意味着训练网络最小化正确输出的负对
|
||||
数概率(等效的于最大化正确输出的对数概率)。
|
||||
|
||||
## 2.优化和训练
|
||||
那么,我们该怎么计算函数实例的损失函数呢?我们应该做什么呢?我们在之前了解到 TensorFlow 中的 Tensor 知道如何计算梯度以及计算
|
||||
梯度相关的东西。由于我们的损失正是一个 Tensor ,因此我们可以使用所有与梯度有关的参数来计算梯度。然后我们可以进行标准梯度更新。
|
||||
设θ为我们的参数,.gif)为损失函数,η一个正的学习率。然后,
|
||||
|
||||

|
||||
|
||||
目前,有大量的算法和积极的研究试图做一些除了这种普通的梯度更新以外的事情。许多人尝试去基于训练时发生的事情来改变学习率。但是,
|
||||
你不需要担心这些特殊的算法到底在干什么,除非你真的很感兴趣。Torch提供了大量的算法在torch.optim包中,且全部都是透明的。在语法
|
||||
上使用复杂的算法和使用最简单的梯度更新一样简单。但是尝试不同的更新算法和在更新算法中使用不同的参数(例如不同的初始学习率)对
|
||||
于优化你的网络的性能很重要。通常,仅仅将普通的SGD替换成一个例如*Adam*或者*RMSProp*优化器都可以显著的提升性能。
|
||||
|
||||
## 3.使用PyTorch创建网络组件
|
||||
在我们继续关注 NLP 之前,让我们先使用PyTorch构建一个只用仿射变换和非线性函数组成的网络示例。我们也将了解如何计算损失函数,使
|
||||
用PyTorch内置的负对数似然函数,以及通过反向传播更新参数。
|
||||
|
||||
所有的网络组件应该继承`nn.Module`并覆盖`forward()`方法。继承`nn.Module`提供给了一些方法给你的组件。例如,它可以跟踪可训练的
|
||||
参数,你可以通过`.to(device)方`法在 CPU 和 GPU 之间交换它们。`.to(device)`方法中的 device 可以是CPU设备`torch.device("cpu")`
|
||||
或者 CUDA 设备`torch.device("cuda:0")`。
|
||||
|
||||
让我们写一个神经网络的示例,它接受一些稀疏的BOW(词袋模式)表示,然后输出分布在两个标签上的概率:“English”和“Spanish”。这个模型只是一个逻辑回归。
|
||||
|
||||
#### 3.1 示例: 基于逻辑回归与词袋模式的文本分类器
|
||||
我们的模型将会把BOW表示映射成标签上的对数概率。我们为词汇中的每个词指定一个索引。例如,我们所有的词汇是两个单词“hello”和"world",
|
||||
用0和1表示。句子“hello hello hello hello”的表示是
|
||||
|
||||
```buildoutcfg
|
||||
[4,0]
|
||||
```
|
||||
对于“hello world world hello”, 则表示成
|
||||
|
||||
```buildoutcfg
|
||||
[2,2]
|
||||
```
|
||||
通常表示成
|
||||
|
||||
```buildoutcfg
|
||||
[Count(hello),Count(world)]
|
||||
```
|
||||
|
||||
用x来表示这个BOW向量。网络的输出是:
|
||||
|
||||

|
||||
|
||||
也就是说,我们数据传入一个仿射变换然后做对数归一化`logsoftmax`。
|
||||
|
||||
```buildoutcfg
|
||||
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
|
||||
("Give it to me".split(), "ENGLISH"),
|
||||
("No creo que sea una buena idea".split(), "SPANISH"),
|
||||
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
|
||||
|
||||
test_data = [("Yo creo que si".split(), "SPANISH"),
|
||||
("it is lost on me".split(), "ENGLISH")]
|
||||
|
||||
# word_to_ix maps each word in the vocab to a unique integer, which will be its
|
||||
# index into the Bag of words vector
|
||||
word_to_ix = {}
|
||||
for sent, _ in data + test_data:
|
||||
for word in sent:
|
||||
if word not in word_to_ix:
|
||||
word_to_ix[word] = len(word_to_ix)
|
||||
print(word_to_ix)
|
||||
|
||||
VOCAB_SIZE = len(word_to_ix)
|
||||
NUM_LABELS = 2
|
||||
|
||||
class BoWClassifier(nn.Module): # inheriting from nn.Module!
|
||||
|
||||
def __init__(self, num_labels, vocab_size):
|
||||
# calls the init function of nn.Module. Dont get confused by syntax,
|
||||
# just always do it in an nn.Module
|
||||
super(BoWClassifier, self).__init__()
|
||||
|
||||
# Define the parameters that you will need. In this case, we need A and b,
|
||||
# the parameters of the affine mapping.
|
||||
# Torch defines nn.Linear(), which provides the affine map.
|
||||
# Make sure you understand why the input dimension is vocab_size
|
||||
# and the output is num_labels!
|
||||
self.linear = nn.Linear(vocab_size, num_labels)
|
||||
|
||||
# NOTE! The non-linearity log softmax does not have parameters! So we don't need
|
||||
# to worry about that here
|
||||
|
||||
def forward(self, bow_vec):
|
||||
# Pass the input through the linear layer,
|
||||
# then pass that through log_softmax.
|
||||
# Many non-linearities and other functions are in torch.nn.functional
|
||||
return F.log_softmax(self.linear(bow_vec), dim=1)
|
||||
|
||||
def make_bow_vector(sentence, word_to_ix):
|
||||
vec = torch.zeros(len(word_to_ix))
|
||||
for word in sentence:
|
||||
vec[word_to_ix[word]] += 1
|
||||
return vec.view(1, -1)
|
||||
|
||||
def make_target(label, label_to_ix):
|
||||
return torch.LongTensor([label_to_ix[label]])
|
||||
|
||||
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)
|
||||
|
||||
# 模型知道它的参数。 下面的第一个输出是A,第二个输出是b。
|
||||
# 无论何时将组件分配给模块的__init__函数中的类变量,都是使用self.linear = nn.Linear(...)行完成的。
|
||||
# 然后通过PyTorch,你的模块(在本例中为BoWClassifier)将存储nn.Linear参数的知识
|
||||
for param in model.parameters():
|
||||
print(param)
|
||||
|
||||
# 要运行模型,请传入BoW矢量
|
||||
# 这里我们不需要训练,所以代码包含在torch.no_grad()中
|
||||
with torch.no_grad():
|
||||
sample = data[0]
|
||||
bow_vector = make_bow_vector(sample[0], word_to_ix)
|
||||
log_probs = model(bow_vector)
|
||||
print(log_probs)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
{'me': 0, 'gusta': 1, 'comer': 2, 'en': 3, 'la': 4, 'cafeteria': 5, 'Give': 6, 'it': 7, 'to': 8, 'No': 9, 'creo': 10, 'que': 11, 'sea': 12, 'una': 13, 'buena': 14, 'idea': 15, 'is': 16, 'not': 17, 'a': 18, 'good': 19, 'get': 20, 'lost': 21, 'at': 22, 'Yo': 23, 'si': 24, 'on': 25}
|
||||
Parameter containing:
|
||||
tensor([[ 0.1194, 0.0609, -0.1268, 0.1274, 0.1191, 0.1739, -0.1099, -0.0323,
|
||||
-0.0038, 0.0286, -0.1488, -0.1392, 0.1067, -0.0460, 0.0958, 0.0112,
|
||||
0.0644, 0.0431, 0.0713, 0.0972, -0.1816, 0.0987, -0.1379, -0.1480,
|
||||
0.0119, -0.0334],
|
||||
[ 0.1152, -0.1136, -0.1743, 0.1427, -0.0291, 0.1103, 0.0630, -0.1471,
|
||||
0.0394, 0.0471, -0.1313, -0.0931, 0.0669, 0.0351, -0.0834, -0.0594,
|
||||
0.1796, -0.0363, 0.1106, 0.0849, -0.1268, -0.1668, 0.1882, 0.0102,
|
||||
0.1344, 0.0406]], requires_grad=True)
|
||||
Parameter containing:
|
||||
tensor([0.0631, 0.1465], requires_grad=True)
|
||||
tensor([[-0.5378, -0.8771]])
|
||||
```
|
||||
|
||||
上面的哪一个值对应的是 ENGLISH 的对数概率,哪一个是SPANISH的对数概率?我们还没有定义,但是如果我必须要定义我们想要训练的东西。
|
||||
|
||||
```buildoutcfg
|
||||
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
|
||||
```
|
||||
|
||||
让我们来训练吧! 我们将实例传入来获取对数概率,计算损失函数,计算损失函数的梯度,然后使用一个梯度步长来更新参数。在PyTorch的`nn`
|
||||
包里提供了损失函数。`nn.NLLLoss()`是我们想要的负对数似然损失函数。`torch.optim`中也定义了优化方法。这里,我们只使用SGD。
|
||||
|
||||
注意,因为`NLLLoss`的输入是一个对数概率的向量以及目标标签。它不会为我们计算对数概率。这也是为什么我们最后一层网络是`log_softmax`
|
||||
的原因。损失函数`nn.CrossEntropyLoss()`除了对结果额外计算了logsoftmax之外,和NLLLoss()没什么区别。
|
||||
|
||||
```buildoutcfg
|
||||
# 在我们训练之前运行测试数据,只是为了看到之前-之后
|
||||
with torch.no_grad():
|
||||
for instance, label in test_data:
|
||||
bow_vec = make_bow_vector(instance, word_to_ix)
|
||||
log_probs = model(bow_vec)
|
||||
print(log_probs)
|
||||
|
||||
# 打印与“creo”对应的矩阵列
|
||||
print(next(model.parameters())[:, word_to_ix["creo"]])
|
||||
|
||||
loss_function = nn.NLLLoss()
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.1)
|
||||
|
||||
# 通常,您希望多次传递训练数据.
|
||||
# 100比实际数据集大得多,但真实数据集有两个以上的实例。
|
||||
# 通常,在5到30个epochs之间是合理的。
|
||||
for epoch in range(100):
|
||||
for instance, label in data:
|
||||
# 步骤1: 请记住,PyTorch会累积梯度。
|
||||
# We need to clear them out before each instance
|
||||
model.zero_grad()
|
||||
|
||||
# 步骤2:制作我们的BOW向量,并且我们必须将目标作为整数包装在Tensor中。
|
||||
# 例如,如果目标是SPANISH,那么我们包装整数0.
|
||||
# 然后,loss函数知道对数概率的第0个元素是对应于SPANISH的对数概率
|
||||
bow_vec = make_bow_vector(instance, word_to_ix)
|
||||
target = make_target(label, label_to_ix)
|
||||
|
||||
# 步骤3:运行我们的前向传递.
|
||||
log_probs = model(bow_vec)
|
||||
|
||||
# 步骤4: 通过调用optimizer.step()来计算损失,梯度和更新参数
|
||||
loss = loss_function(log_probs, target)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
with torch.no_grad():
|
||||
for instance, label in test_data:
|
||||
bow_vec = make_bow_vector(instance, word_to_ix)
|
||||
log_probs = model(bow_vec)
|
||||
print(log_probs)
|
||||
|
||||
# 对应西班牙语的指数上升,英语下降!
|
||||
print(next(model.parameters())[:, word_to_ix["creo"]])
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[-0.9297, -0.5020]])
|
||||
tensor([[-0.6388, -0.7506]])
|
||||
tensor([-0.1488, -0.1313], grad_fn=<SelectBackward>)
|
||||
tensor([[-0.2093, -1.6669]])
|
||||
tensor([[-2.5330, -0.0828]])
|
||||
tensor([ 0.2803, -0.5605], grad_fn=<SelectBackward>)
|
||||
```
|
||||
|
||||
我们得到了正确的结果!你可以看到Spanish的对数概率比第一个例子中的高的多,English的对数概率在第二个测试数据中更高,结果也应该
|
||||
是这样。
|
||||
|
||||
现在你了解了如何创建一个PyTorch组件,将数据传入并进行梯度更新。现在我们已经可以开始进行深度学习上的自然语言处理了。
|
||||
|
||||
|
@ -11,16 +11,16 @@
|
||||
如何变化。如果你想要尝试一些真实数据,您有能力删除本示例中的模型并重新训练他们。
|
||||
|
||||
* PyTorch简介:
|
||||

|
||||
[PyTorch简介](https://github.com/fendouai/PyTorchDocs/blob/master/FifthSection/PyTorch_Introuctiion.md)
|
||||
|
||||
* 使用PyTorch进行深度学习:
|
||||

|
||||
[使用PyTorch进行深度学习](https://github.com/fendouai/PyTorchDocs/blob/master/FifthSection/DL_with_PyTorch.md)
|
||||
|
||||
* 词嵌入:编码形式的词汇语义:
|
||||

|
||||
[词嵌入:编码形式的词汇语义](https://github.com/fendouai/PyTorchDocs/blob/master/FifthSection/Word_Embedding.md)
|
||||
|
||||
* 序列模型和长短句记忆(LSTM)模型:
|
||||

|
||||
[序列模型和长短句记忆(LSTM)模型](https://github.com/fendouai/PyTorchDocs/blob/master/FifthSection/Sequence_and_LSTM_Network.md)
|
||||
|
||||
* Advanced:制定动态决策和BI-LSTM CRF:
|
||||

|
||||
* 高级:制定动态决策和BI-LSTM CRF:
|
||||
[制定动态决策和BI-LSTM CRF](https://github.com/fendouai/PyTorchDocs/blob/master/FifthSection/Dynamic_Desicion_Bi-LSTM.md)
|
@ -1 +1,304 @@
|
||||
# Advanced:制定动态决策和BI-LSTM CRF
|
||||
# 高级:制定动态决策和BI-LSTM CRF
|
||||
## 1.动态与静态深度学习工具包
|
||||
Pytorch是一种*动态*神经网络套件。另一个动态套件的例子是[Dynet](https://github.com/clab/dynet)(我之所以提到这一点,因为与
|
||||
Pytorch和Dynet一起使用是相似的。如果你在Dynet中看到一个例子,它可能会帮助你在Pytorch中实现它)。相反的是*静态*工具包,其中包
|
||||
括Theano,Keras,TensorFlow等。核心区别如下:
|
||||
* 在静态工具包中,您可以定义一次计算图,对其进行编译,然后将实例流式传输给它。
|
||||
* 在动态工具包中,为每个实例定义计算图。它永远不会被编译并且是即时执行的。
|
||||
|
||||
在没有很多经验的情况下,很难理解其中的差异。一个例子是假设我们想要构建一个深层组成解析器。假设我们的模型大致涉及以下步骤:
|
||||
* 我们自底向上地建造树
|
||||
* 标记根节点(句子的单词)
|
||||
* 从那里,使用神经网络和单词的嵌入来找到形成组成部分的组合。每当你形成一个新的成分时,使用某种技术来嵌入成分。在这种情况下,
|
||||
我们的网络架构将完全取决于输入句子。在“The green cat scratched the wall”一句中,在模型中的某个点上,我们想要结合跨度.gif)
|
||||
(即,NP 组成部分跨越单词1到单词3,在这种情况下是“The green cat”)。
|
||||
|
||||
然而,另一句话可能是“Somewhere, the big fat cat scratched the wall”。在这句话中,我们希望在某个时刻形成组成.gif)。
|
||||
我们想要形成的成分将取决于实例。如果我们只编译计算图一次,就像在静态工具包中那样,但编写这个逻辑将非常困难或者说是不可能的。
|
||||
但是,在动态工具包中,不仅有1个预定义的计算图。每个实例都可以有一个新的计算图,所以这个问题就消失了。
|
||||
|
||||
动态工具包还具有易于调试和代码更接近宿主语言的优点(我的意思是Pytorch和Dynet看起来更像是比Keras或Theano更实际的Python代码)。
|
||||
|
||||
## 2.Bi-LSTM条件随机场讨论
|
||||
对于本节,我们将看到用于命名实体识别的Bi-LSTM条件随机场的完整复杂示例。虽然上面的LSTM标记符通常足以用于词性标注,但是像CRF这样的
|
||||
序列模型对于NER上的强大性能非常重要。CRF,虽然这个名字听起来很可怕,但所有模型都是CRF,在LSTM中提供了这些功能。CRF是一个高级模型,
|
||||
比本教程中的任何早期模型复杂得多。如果你想跳过它,也可以。要查看您是否准备好,请查看是否可以:
|
||||
|
||||
* 在步骤i中为标记k写出维特比变量的递归。
|
||||
* 修改上述重复以计算转发变量。
|
||||
* 再次修改上面的重复计算以计算日志空间中的转发变量(提示:log-sum-exp)
|
||||
|
||||
如果你可以做这三件事,你应该能够理解下面的代码。回想一下,CRF计算条件概率。设为标签序列,
|
||||
为字的输入序列。然后我们计算
|
||||
|
||||
.gif)
|
||||
|
||||
通过定义一些对数电位来确定得分:
|
||||
|
||||
.gif)
|
||||
|
||||
为了使分区功能易于处理,电位必须仅查看局部特征。
|
||||
|
||||
在Bi-LSTM CRF中,我们定义了两种潜力:发射和过渡。索引处的单词的发射电位来自时间步长处
|
||||
的Bi-LSTM的隐藏状态。转换分数存储在矩阵中,其中是标记集。在我们的
|
||||
实现中,是从标签 转换到标签的分数。所以:
|
||||
|
||||

|
||||
|
||||
在第二个表达式中,我们将标记视为分配了唯一的非负索引。
|
||||
|
||||
如果上面的讨论过于简短,你可以查看[这个](http://www.cs.columbia.edu/~mcollins/crf.pdf),是迈克尔柯林斯写的关于CRF的文章。
|
||||
|
||||
## 3.实现说明
|
||||
下面的示例实现了日志空间中的前向算法来计算分区函数,以及用于解码的维特比算法。反向传播将自动为我们计算梯度。我们不需要手工做任何事情。
|
||||
|
||||
这个实现冰未优化。如果您了解发生了什么,您可能会很快发现在前向算法中迭代下一个标记可能是在一个大的操作中完成的。我想编码更具可读性。
|
||||
如果您想进行相关更改,可以将此标记器用于实际任务。
|
||||
|
||||
#### 3.1 导包
|
||||
```buildoutcfg
|
||||
# Author: Robert Guthrie
|
||||
|
||||
import torch
|
||||
import torch.autograd as autograd
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
|
||||
torch.manual_seed(1)
|
||||
```
|
||||
|
||||
#### 3.2 辅助函数
|
||||
辅助函数的功能是使代码更具可读性。
|
||||
|
||||
```buildoutcfg
|
||||
def argmax(vec):
|
||||
# 将argmax作为python int返回
|
||||
_, idx = torch.max(vec, 1)
|
||||
return idx.item()
|
||||
|
||||
|
||||
def prepare_sequence(seq, to_ix):
|
||||
idxs = [to_ix[w] for w in seq]
|
||||
return torch.tensor(idxs, dtype=torch.long)
|
||||
|
||||
|
||||
# 以正向算法的数值稳定方式计算log sum exp
|
||||
def log_sum_exp(vec):
|
||||
max_score = vec[0, argmax(vec)]
|
||||
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
|
||||
return max_score + \
|
||||
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
|
||||
```
|
||||
|
||||
#### 3.3 创建模型
|
||||
|
||||
```buildoutcfg
|
||||
class BiLSTM_CRF(nn.Module):
|
||||
|
||||
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
|
||||
super(BiLSTM_CRF, self).__init__()
|
||||
self.embedding_dim = embedding_dim
|
||||
self.hidden_dim = hidden_dim
|
||||
self.vocab_size = vocab_size
|
||||
self.tag_to_ix = tag_to_ix
|
||||
self.tagset_size = len(tag_to_ix)
|
||||
|
||||
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
|
||||
self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
|
||||
num_layers=1, bidirectional=True)
|
||||
|
||||
# 将LSTM的输出映射到标记空间。
|
||||
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
|
||||
|
||||
# 转换参数矩阵。 输入i,j是得分从j转换到i。
|
||||
self.transitions = nn.Parameter(
|
||||
torch.randn(self.tagset_size, self.tagset_size))
|
||||
|
||||
# 这两个语句强制执行我们从不转移到开始标记的约束
|
||||
# 并且我们永远不会从停止标记转移
|
||||
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
|
||||
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
|
||||
|
||||
self.hidden = self.init_hidden()
|
||||
|
||||
def init_hidden(self):
|
||||
return (torch.randn(2, 1, self.hidden_dim // 2),
|
||||
torch.randn(2, 1, self.hidden_dim // 2))
|
||||
|
||||
def _forward_alg(self, feats):
|
||||
# 使用前向算法来计算分区函数
|
||||
init_alphas = torch.full((1, self.tagset_size), -10000.)
|
||||
# START_TAG包含所有得分.
|
||||
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
|
||||
|
||||
# 包装一个变量,以便我们获得自动反向提升
|
||||
forward_var = init_alphas
|
||||
|
||||
# 通过句子迭代
|
||||
for feat in feats:
|
||||
alphas_t = [] # The forward tensors at this timestep
|
||||
for next_tag in range(self.tagset_size):
|
||||
# 广播发射得分:无论以前的标记是怎样的都是相同的
|
||||
emit_score = feat[next_tag].view(
|
||||
1, -1).expand(1, self.tagset_size)
|
||||
# trans_score的第i个条目是从i转换到next_tag的分数
|
||||
trans_score = self.transitions[next_tag].view(1, -1)
|
||||
# next_tag_var的第i个条目是我们执行log-sum-exp之前的边(i -> next_tag)的值
|
||||
next_tag_var = forward_var + trans_score + emit_score
|
||||
# 此标记的转发变量是所有分数的log-sum-exp。
|
||||
alphas_t.append(log_sum_exp(next_tag_var).view(1))
|
||||
forward_var = torch.cat(alphas_t).view(1, -1)
|
||||
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
|
||||
alpha = log_sum_exp(terminal_var)
|
||||
return alpha
|
||||
|
||||
def _get_lstm_features(self, sentence):
|
||||
self.hidden = self.init_hidden()
|
||||
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
|
||||
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
|
||||
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
|
||||
lstm_feats = self.hidden2tag(lstm_out)
|
||||
return lstm_feats
|
||||
|
||||
def _score_sentence(self, feats, tags):
|
||||
# Gives the score of a provided tag sequence
|
||||
score = torch.zeros(1)
|
||||
tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
|
||||
for i, feat in enumerate(feats):
|
||||
score = score + \
|
||||
self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]
|
||||
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
|
||||
return score
|
||||
|
||||
def _viterbi_decode(self, feats):
|
||||
backpointers = []
|
||||
|
||||
# Initialize the viterbi variables in log space
|
||||
init_vvars = torch.full((1, self.tagset_size), -10000.)
|
||||
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
|
||||
|
||||
# forward_var at step i holds the viterbi variables for step i-1
|
||||
forward_var = init_vvars
|
||||
for feat in feats:
|
||||
bptrs_t = [] # holds the backpointers for this step
|
||||
viterbivars_t = [] # holds the viterbi variables for this step
|
||||
|
||||
for next_tag in range(self.tagset_size):
|
||||
# next_tag_var [i]保存上一步的标签i的维特比变量
|
||||
# 加上从标签i转换到next_tag的分数。
|
||||
# 我们这里不包括emission分数,因为最大值不依赖于它们(我们在下面添加它们)
|
||||
next_tag_var = forward_var + self.transitions[next_tag]
|
||||
best_tag_id = argmax(next_tag_var)
|
||||
bptrs_t.append(best_tag_id)
|
||||
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
|
||||
# 现在添加emission分数,并将forward_var分配给我们刚刚计算的维特比变量集
|
||||
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
|
||||
backpointers.append(bptrs_t)
|
||||
|
||||
# 过渡到STOP_TAG
|
||||
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
|
||||
best_tag_id = argmax(terminal_var)
|
||||
path_score = terminal_var[0][best_tag_id]
|
||||
|
||||
# 按照后退指针解码最佳路径。
|
||||
best_path = [best_tag_id]
|
||||
for bptrs_t in reversed(backpointers):
|
||||
best_tag_id = bptrs_t[best_tag_id]
|
||||
best_path.append(best_tag_id)
|
||||
# 弹出开始标记(我们不想将其返回给调用者)
|
||||
start = best_path.pop()
|
||||
assert start == self.tag_to_ix[START_TAG] # Sanity check
|
||||
best_path.reverse()
|
||||
return path_score, best_path
|
||||
|
||||
def neg_log_likelihood(self, sentence, tags):
|
||||
feats = self._get_lstm_features(sentence)
|
||||
forward_score = self._forward_alg(feats)
|
||||
gold_score = self._score_sentence(feats, tags)
|
||||
return forward_score - gold_score
|
||||
|
||||
def forward(self, sentence): # dont confuse this with _forward_alg above.
|
||||
# 获取BiLSTM的emission分数
|
||||
lstm_feats = self._get_lstm_features(sentence)
|
||||
|
||||
# 根据功能,找到最佳路径。
|
||||
score, tag_seq = self._viterbi_decode(lstm_feats)
|
||||
return score, tag_seq
|
||||
```
|
||||
|
||||
#### 3.4 进行训练
|
||||
|
||||
```buildoutcfg
|
||||
START_TAG = "<START>"
|
||||
STOP_TAG = "<STOP>"
|
||||
EMBEDDING_DIM = 5
|
||||
HIDDEN_DIM = 4
|
||||
|
||||
# 弥补一些训练数据
|
||||
training_data = [(
|
||||
"the wall street journal reported today that apple corporation made money".split(),
|
||||
"B I I I O O O B I O O".split()
|
||||
), (
|
||||
"georgia tech is a university in georgia".split(),
|
||||
"B I O O O O B".split()
|
||||
)]
|
||||
|
||||
word_to_ix = {}
|
||||
for sentence, tags in training_data:
|
||||
for word in sentence:
|
||||
if word not in word_to_ix:
|
||||
word_to_ix[word] = len(word_to_ix)
|
||||
|
||||
tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}
|
||||
|
||||
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
|
||||
|
||||
# 在训练前检查预测
|
||||
with torch.no_grad():
|
||||
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
|
||||
precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)
|
||||
print(model(precheck_sent))
|
||||
|
||||
# 确保加载LSTM部分中较早的prepare_sequence
|
||||
for epoch in range(
|
||||
300): # again, normally you would NOT do 300 epochs, it is toy data
|
||||
for sentence, tags in training_data:
|
||||
# 步骤1. 请记住,Pytorch积累了梯度
|
||||
# We need to clear them out before each instance
|
||||
model.zero_grad()
|
||||
|
||||
# 步骤2. 为我们为网络准备的输入,即将它们转换为单词索引的张量.
|
||||
sentence_in = prepare_sequence(sentence, word_to_ix)
|
||||
targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)
|
||||
|
||||
# 步骤3. 向前运行
|
||||
loss = model.neg_log_likelihood(sentence_in, targets)
|
||||
|
||||
# 步骤4.通过调用optimizer.step()来计算损失,梯度和更新参数
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# 训练后检查预测
|
||||
with torch.no_grad():
|
||||
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
|
||||
print(model(precheck_sent))
|
||||
# 得到结果
|
||||
```
|
||||
|
||||
* 输出结果
|
||||
|
||||
```buildoutcfg
|
||||
(tensor(2.6907), [1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1])
|
||||
(tensor(20.4906), [0, 1, 1, 1, 2, 2, 2, 0, 1, 2, 2])
|
||||
```
|
||||
|
||||
## 4.练习:区分标记的新损失函数
|
||||
我们没有必要在进行解码时创建计算图,因为我们不会从维特比路径得分反向传播。因为无论如何我们都有它,尝试训练标记器,其中损失函
|
||||
数是Viterbi path得分和gold-standard得分之间的差异。应该清楚的是,当预测的标签序列是正确的标签序列时,该功能是非负值和0。这基
|
||||
本上是*结构感知器*。
|
||||
|
||||
由于已经实现了Viterbi和score_sentence,因此这种修改应该很短。这是一个关于计算图形的形状*取决于训练实例*的示例。虽然我没有尝
|
||||
试在静态工具包中实现它,但我想它是可以的但可能没有那么容易。
|
||||
|
||||
拿起一些真实数据并进行比较!
|
||||
|
@ -1 +0,0 @@
|
||||
# PyTorch简介
|
331
FifthSection/PyTorch_Introuction.md
Normal file
@ -0,0 +1,331 @@
|
||||
# PyTorch简介
|
||||
## 1.Torch张量库介绍
|
||||
深度学习的所有计算都是在张量上进行的,其中张量是一个可以被超过二维索引的矩阵的一般表示形式。稍后我们将详细讨论这意味着什么。首
|
||||
先,我们先来看一下我们可以用张量来干什么。
|
||||
|
||||
```buildoutcfg
|
||||
# 作者: Robert Guthrie
|
||||
|
||||
import torch
|
||||
import torch.autograd as autograd
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
torch.manual_seed(1)
|
||||
```
|
||||
|
||||
#### 1.1 创建张量
|
||||
张量可以在Python list形式下通过`torch.Tensor()`函数创建。
|
||||
|
||||
```buildoutcfg
|
||||
# 利用给定数据创建一个torch.Tensor对象.这是一个一维向量
|
||||
V_data = [1., 2., 3.]
|
||||
V = torch.Tensor(V_data)
|
||||
print(V)
|
||||
|
||||
# 创建一个矩阵
|
||||
M_data = [[1., 2., 3.], [4., 5., 6]]
|
||||
M = torch.Tensor(M_data)
|
||||
print(M)
|
||||
|
||||
# 创建2x2x2形式的三维张量.
|
||||
T_data = [[[1., 2.], [3., 4.]],
|
||||
[[5., 6.], [7., 8.]]]
|
||||
T = torch.Tensor(T_data)
|
||||
print(T)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([1., 2., 3.])
|
||||
tensor([[1., 2., 3.],
|
||||
[4., 5., 6.]])
|
||||
tensor([[[1., 2.],
|
||||
[3., 4.]],
|
||||
|
||||
[[5., 6.],
|
||||
[7., 8.]]])
|
||||
```
|
||||
|
||||
什么是三维张量?让我们这样想象。如果你有一个向量,那么对这个向量索引就会得到一个标量。如果你有一个矩阵,对这个矩阵索引那么就会
|
||||
得到一个向量。如果你有一个三维张量,那么对其索引就会得到一个矩阵!
|
||||
|
||||
针对术语的说明:当我在本教程内使用“tensor”,它针对的是所有`torch.Tensor`对象。矩阵和向量是特殊的`torch.Tensors`,他们的维
|
||||
度分别是1和2。当我说到三维张量,我会简洁的使用“3D tensor”。
|
||||
|
||||
```buildoutcfg
|
||||
# 索引V得到一个标量(0维张量)
|
||||
print(V[0])
|
||||
|
||||
# 从向量V中获取一个数字
|
||||
print(V[0].item())
|
||||
|
||||
# 索引M得到一个向量
|
||||
print(M[0])
|
||||
|
||||
# 索引T得到一个矩阵
|
||||
print(T[0])
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor(1.)
|
||||
1.0
|
||||
tensor([1., 2., 3.])
|
||||
tensor([[1., 2.],
|
||||
[3., 4.]])
|
||||
```
|
||||
你也可以创建其他数据类型的tensors。默认的数据类型为浮点型。可以使用`torch.LongTensor()`来创建一个整数类型的张量。你可以在文
|
||||
件中寻找更多的数据类型,但是浮点型和长整形是最常用的。
|
||||
|
||||
你可以使用`torch.randn()`创建一个张量。这个张量拥有随机数据和需要指定的维度。
|
||||
|
||||
```buildoutcfg
|
||||
x = torch.randn((3, 4, 5))
|
||||
print(x)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[[-1.5256, -0.7502, -0.6540, -1.6095, -0.1002],
|
||||
[-0.6092, -0.9798, -1.6091, -0.7121, 0.3037],
|
||||
[-0.7773, -0.2515, -0.2223, 1.6871, 0.2284],
|
||||
[ 0.4676, -0.6970, -1.1608, 0.6995, 0.1991]],
|
||||
|
||||
[[ 0.8657, 0.2444, -0.6629, 0.8073, 1.1017],
|
||||
[-0.1759, -2.2456, -1.4465, 0.0612, -0.6177],
|
||||
[-0.7981, -0.1316, 1.8793, -0.0721, 0.1578],
|
||||
[-0.7735, 0.1991, 0.0457, 0.1530, -0.4757]],
|
||||
|
||||
[[-0.1110, 0.2927, -0.1578, -0.0288, 0.4533],
|
||||
[ 1.1422, 0.2486, -1.7754, -0.0255, -1.0233],
|
||||
[-0.5962, -1.0055, 0.4285, 1.4761, -1.7869],
|
||||
[ 1.6103, -0.7040, -0.1853, -0.9962, -0.8313]]])
|
||||
```
|
||||
|
||||
#### 1.2 张量操作
|
||||
你可以以你想要的方式操作张量。
|
||||
|
||||
```buildoutcfg
|
||||
x = torch.Tensor([1., 2., 3.])
|
||||
y = torch.Tensor([4., 5., 6.])
|
||||
z = x + y
|
||||
print(z)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([5., 7., 9.])
|
||||
```
|
||||
|
||||
可以查阅[文档](https://pytorch.org/docs/torch.html)获取大量可用操作的完整列表,这些操作不仅局限于数学操作范围。
|
||||
|
||||
接下来一个很有帮助的操作就是连接。
|
||||
|
||||
```buildoutcfg
|
||||
# 默认情况下, 它沿着第一个行进行连接 (连接行)
|
||||
x_1 = torch.randn(2, 5)
|
||||
y_1 = torch.randn(3, 5)
|
||||
z_1 = torch.cat([x_1, y_1])
|
||||
print(z_1)
|
||||
|
||||
# 连接列:
|
||||
x_2 = torch.randn(2, 3)
|
||||
y_2 = torch.randn(2, 5)
|
||||
# 第二个参数指定了沿着哪条轴连接
|
||||
z_2 = torch.cat([x_2, y_2], 1)
|
||||
print(z_2)
|
||||
|
||||
# 如果你的tensors是不兼容的,torch会报错。取消注释来查看错误。
|
||||
# torch.cat([x_1, x_2])
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[-0.8029, 0.2366, 0.2857, 0.6898, -0.6331],
|
||||
[ 0.8795, -0.6842, 0.4533, 0.2912, -0.8317],
|
||||
[-0.5525, 0.6355, -0.3968, -0.6571, -1.6428],
|
||||
[ 0.9803, -0.0421, -0.8206, 0.3133, -1.1352],
|
||||
[ 0.3773, -0.2824, -2.5667, -1.4303, 0.5009]])
|
||||
tensor([[ 0.5438, -0.4057, 1.1341, -0.1473, 0.6272, 1.0935, 0.0939, 1.2381],
|
||||
[-1.1115, 0.3501, -0.7703, -1.3459, 0.5119, -0.6933, -0.1668, -0.9999]])
|
||||
```
|
||||
|
||||
#### 1.3 重构张量
|
||||
使用`.view()`去重构张量。这是一个高频方法,因为许多神经网络的神经元对输入格式有明确的要求。你通常需要先将数据重构再输入到神经
|
||||
元中。
|
||||
|
||||
```buildoutcfg
|
||||
x = torch.randn(2, 3, 4)
|
||||
print(x)
|
||||
print(x.view(2, 12)) # 重构为2行12列
|
||||
# 同上。如果维度为-1,那么它的大小可以根据数据推断出来
|
||||
print(x.view(2, -1))
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```
|
||||
tensor([[[ 0.4175, -0.2127, -0.8400, -0.4200],
|
||||
[-0.6240, -0.9773, 0.8748, 0.9873],
|
||||
[-0.0594, -2.4919, 0.2423, 0.2883]],
|
||||
|
||||
[[-0.1095, 0.3126, 1.5038, 0.5038],
|
||||
[ 0.6223, -0.4481, -0.2856, 0.3880],
|
||||
[-1.1435, -0.6512, -0.1032, 0.6937]]])
|
||||
tensor([[ 0.4175, -0.2127, -0.8400, -0.4200, -0.6240, -0.9773, 0.8748, 0.9873,
|
||||
-0.0594, -2.4919, 0.2423, 0.2883],
|
||||
[-0.1095, 0.3126, 1.5038, 0.5038, 0.6223, -0.4481, -0.2856, 0.3880,
|
||||
-1.1435, -0.6512, -0.1032, 0.6937]])
|
||||
tensor([[ 0.4175, -0.2127, -0.8400, -0.4200, -0.6240, -0.9773, 0.8748, 0.9873,
|
||||
-0.0594, -2.4919, 0.2423, 0.2883],
|
||||
[-0.1095, 0.3126, 1.5038, 0.5038, 0.6223, -0.4481, -0.2856, 0.3880,
|
||||
-1.1435, -0.6512, -0.1032, 0.6937]])
|
||||
```
|
||||
|
||||
## 2.计算图和自动求导
|
||||
计算图的思想对于有效率的深度学习编程是很重要的,因为它可以使你不必去自己写反向梯度传播。计算图只是简单地说明了如何将数据组合
|
||||
在一起以输出结果。因为图完全指定了操作所包含的参数,因此它包含了足够的信息去求导。这可能听起来很模糊,所以让我们看看使用Pytorch
|
||||
的基本标记(属性):`requires_grad`。
|
||||
|
||||
首先,从程序员的角度来思考。我们在上面刚刚创建的`torch.Tensor`对象中存储了什么?显然,是数据和结构,也很可能是其他的东西。
|
||||
但是当我们将两个张量相加,我们得到了一个输出张量。这个输出所能体现出的只有数据和结构,并不能体现出是由两个张量加之和得到的
|
||||
(因为它可能是从一个文件中读取的, 也可能是其他操作的结果等)。
|
||||
|
||||
如果`requires_grad=True`,张量对象可以一直跟踪它是如何创建的。让我们在实际中来看。
|
||||
```
|
||||
# 张量对象带有“requires_grad”标记
|
||||
x =torch.Tensor([1., 2., 3], requires_grad=True)
|
||||
|
||||
# 通过requires_grad=True,您也可以做之前所有的操作。
|
||||
y = torch.Tensor([4., 5., 6], requires_grad=True)
|
||||
z = x + y
|
||||
print(z)
|
||||
|
||||
# 但是z还有一些额外的东西.
|
||||
print(z.grad_fn)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([5., 7., 9.], grad_fn=<AddBackward0>)
|
||||
<AddBackward0 object at 0x7fd66e2d9cf8>
|
||||
```
|
||||
|
||||
既然变量知道怎么创建的它们。z知道它并非是从文件读取的,也不是乘法或指数或其他运算的结果。如果你继续跟踪`z.grad_fn`,你会从中
|
||||
找到x和y的痕迹。
|
||||
|
||||
但是它如何帮助我们计算梯度?
|
||||
|
||||
```buildoutcfg
|
||||
# 我们来将z中所有项作和运算
|
||||
s = z.sum()
|
||||
print(s)
|
||||
print(s.grad_fn)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor(21., grad_fn=<SumBackward0>)
|
||||
<SumBackward0 object at 0x7fd73f1c7e48>
|
||||
```
|
||||
|
||||
那么这个计算和对x的第一个分量的导数等于多少? 在数学上,我们求
|
||||
|
||||

|
||||
|
||||
s是被作为张量z的和创建的。张量z是x+y的和,因此
|
||||
|
||||

|
||||
|
||||
并且s包含了足够的信息去决定我们需要的导数为1!
|
||||
|
||||
当然它掩盖了如何计算导数的挑战。这是因为s携带了足够多的信息所以导数可以被计算。现实中,Pytorch 程序的开发人员用程序指令`sum()`和
|
||||
`+ `操作以知道如何计算它们的梯度并且运行反向传播算法。深入讨论此算法超出了本教程的范围.
|
||||
|
||||
让我们用Pytorch计算梯度,发现我们是对的:(注意如果你运行这个模块很多次,它的梯度会上升,这是因为Pytorch累积梯度渐变为`.grad`属
|
||||
性,而且对于很多模型它是很方便的.)
|
||||
|
||||
```buildoutcfg
|
||||
# 在任意变量上使用 .backward()将会运行反向,从它开始.
|
||||
s.backward()
|
||||
print(x.grad)
|
||||
```
|
||||
|
||||
* 输出结果;
|
||||
|
||||
```buildoutcfg
|
||||
tensor([1., 1., 1.])
|
||||
```
|
||||
|
||||
作为一个成功的深度学习程序员了解下面的模块如何运行是至关重要的。
|
||||
|
||||
```buildoutcfg
|
||||
x = torch.randn((2, 2))
|
||||
y = torch.randn((2, 2))
|
||||
#用户创建的张量在默认情况下“requires_grad=False”
|
||||
print(x.requires_grad, y.requires_grad)
|
||||
z = x + y
|
||||
# 你不能通过z反向传播。
|
||||
print(z.grad_fn)
|
||||
|
||||
|
||||
# “.requires_grad_( ... )”改变了“requires_grad”属性
|
||||
# 如果没有指定,标记默认为True
|
||||
x = x.requires_grad_()
|
||||
y = y.requires_grad_()
|
||||
#正如我们在上面看到的一样,z包含足够的信息计算梯度。
|
||||
z = x + y
|
||||
print(z.grad_fn)
|
||||
# 如果任何操作的输入部分带有“requires_grad=True”那么输出就会变为:
|
||||
print(z.requires_grad)
|
||||
|
||||
# 现在z有关于x,y的历史信息
|
||||
# 我们可以获取它的值,将其从历史中分离出来吗?
|
||||
new_z = z.detach()
|
||||
|
||||
# new_z有足够的信息反向传播至x和y吗?
|
||||
# 答案是没有
|
||||
print(new_z.grad_fn)
|
||||
# 怎么会这样? “z.detach()”函数返回了一个与“z”相同存储的张量
|
||||
#但是没有携带历史的计算信息。
|
||||
# 它对于自己是如何计算得来的不知道任何事情。
|
||||
# 从本质上讲,我们已经把这个变量从过去的历史中分离出来了。
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
False False
|
||||
None
|
||||
<AddBackward0 object at 0x7fd66c736470>
|
||||
True
|
||||
None
|
||||
```
|
||||
|
||||
您也可以通过`.requires_grad ``= True by wrapping the code block in ``with torch.no_grad()`:停止跟踪张量的历史记录中的自动
|
||||
求导。
|
||||
|
||||
```buildoutcfg
|
||||
print(x.requires_grad)
|
||||
print((x ** 2).requires_grad)
|
||||
|
||||
with torch.no_grad():
|
||||
print((x ** 2).requires_grad)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
True
|
||||
True
|
||||
False
|
||||
```
|
@ -1 +1,240 @@
|
||||
# 序列模型和长短句记忆(LSTM)模型
|
||||
# 序列模型和长短句记忆(LSTM)模型
|
||||
|
||||
* 前馈网络
|
||||
|
||||
之前我们已经学过了许多的前馈网络。所谓前馈网络, 就是网络中不会保存状态。然而有时这并不是我们想要的效果。在自然语言处理
|
||||
(NLP, Natural Language Processing) 中, 序列模型是一个核心的概念。
|
||||
|
||||
* 序列模型
|
||||
|
||||
所谓序列模型, 即输入依赖于时间信息的模型。一个典型的序列模型是隐马尔科夫模型 (HMM, Hidden Markov Model)。另一个序列模型的例
|
||||
子是条件随机场 (CRF, Conditional Random Field)。
|
||||
|
||||
* 循环神经网络
|
||||
|
||||
循环神经网络是指可以保存某种状态的神经网络。比如说, 神经网络中上个时刻的输出可以作为下个 时刻的输入的一部分, 以此信息就可以
|
||||
通过序列在网络中一直往后传递。对于LSTM (Long-Short Term Memory) 来说, 序列中的每个元素都有一个相应的隐状态,
|
||||
该隐状态原则上可以包含序列当前结点之前的任一节点的信息。我们可以使用隐藏状态来预测语言模型中的单词, 词性标签以及其他。
|
||||
|
||||
## 1.Pytorch中的LSTM
|
||||
在正式学习之前,有几个点要说明一下,Pytorch中 LSTM 的输入形式是一个 3D 的Tensor,每一个维度都有重要的意义,第一个维度就是序列本身,
|
||||
第二个维度是`mini-batch`中实例的索引,第三个维度是输入元素的索引,我们之前没有接触过`mini-batch`,所以我们就先忽略它并假设第
|
||||
二维的维度是1。如果要用"The cow jumped"这个句子来运行一个序列模型,那么就应该把它整理成如下的形式:
|
||||
|
||||

|
||||
|
||||
除了有一个额外的大小为1的第二维度。
|
||||
|
||||
此外, 你还可以向网络逐个输入序列, 在这种情况下, 第一个轴的大小也是1。
|
||||
|
||||
来看一个简单的例子。
|
||||
|
||||
```buildoutcfg
|
||||
# Author: Robert Guthrie
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
torch.manual_seed(1)
|
||||
```
|
||||
```buildoutcfg
|
||||
lstm = nn.LSTM(3, 3) # 输入维度为3维,输出维度为3维
|
||||
inputs = [torch.randn(1, 3) for _ in range(5)] # 生成一个长度为5的序列
|
||||
|
||||
# 初始化隐藏状态.
|
||||
hidden = (torch.randn(1, 1, 3),
|
||||
torch.randn(1, 1, 3))
|
||||
for i in inputs:
|
||||
# 将序列中的元素逐个输入到LSTM.
|
||||
# 经过每步操作,hidden 的值包含了隐藏状态的信息.
|
||||
out, hidden = lstm(i.view(1, 1, -1), hidden)
|
||||
|
||||
# 另外我们可以对一整个序列进行训练.
|
||||
# LSTM第一个返回的第一个值是所有时刻的隐藏状态
|
||||
# 第二个返回值是最后一个时刻的隐藏状态
|
||||
#(所以"out"的最后一个和"hidden"是一样的)
|
||||
# 之所以这样设计:
|
||||
# 通过"out"你能取得任何一个时刻的隐藏状态,而"hidden"的值是用来进行序列的反向传播运算, 具体方式就是将它作为参数传入后面的 LSTM 网络.
|
||||
|
||||
# 增加额外的第二个维度.
|
||||
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
|
||||
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) # 清空隐藏状态.
|
||||
out, hidden = lstm(inputs, hidden)
|
||||
print(out)
|
||||
print(hidden)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[[-0.0187, 0.1713, -0.2944]],
|
||||
|
||||
[[-0.3521, 0.1026, -0.2971]],
|
||||
|
||||
[[-0.3191, 0.0781, -0.1957]],
|
||||
|
||||
[[-0.1634, 0.0941, -0.1637]],
|
||||
|
||||
[[-0.3368, 0.0959, -0.0538]]], grad_fn=<StackBackward>)
|
||||
(tensor([[[-0.3368, 0.0959, -0.0538]]], grad_fn=<StackBackward>), tensor([[[-0.9825, 0.4715, -0.0633]]], grad_fn=<StackBackward>))
|
||||
```
|
||||
|
||||
## 2.例子:用LSTM来进行词性标注
|
||||
在这部分, 我们将会使用一个 LSTM 网络来进行词性标注。在这里我们不会用到维特比算法, 前向-后向算法或者任何类似的算法,而是将这部
|
||||
分内容作为一个 (有挑战) 的练习留给读者, 希望读者在了解了这部分的内容后能够实现如何将维特比算法应用到 LSTM 网络中来。
|
||||
|
||||
该模型如下:输入的句子是,其中,标签的集合定义为 T,
|
||||
为单词的标签,用表示对单词词性的预测。
|
||||
|
||||
这是一个结构预测模型, 我们的输出是一个序列, 其中。
|
||||
|
||||
在进行预测时, 需将句子每个词输入到一个 LSTM 网络中。将时刻 i 的隐藏状态标记为,同样地, 对每个标签赋一个
|
||||
独一无二的索引 (类似 word embeddings 部分 word_to_ix 的设置). 然后就得到了的预测规则:
|
||||
|
||||

|
||||
|
||||
即先对隐状态进行一个仿射变换, 然后计算一个对数 softmax, 最后得到的预测标签即为对数 softmax 中最大的值对应的标签. 注意, 这也
|
||||
意味着 A 空间的维度是|T|。
|
||||
|
||||
#### 2.1 准备数据
|
||||
```buildoutcfg
|
||||
def prepare_sequence(seq, to_ix):
|
||||
idxs = [to_ix[w] for w in seq]
|
||||
return torch.tensor(idxs, dtype=torch.long)
|
||||
|
||||
training_data = [
|
||||
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
|
||||
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
|
||||
]
|
||||
word_to_ix = {}
|
||||
for sent, tags in training_data:
|
||||
for word in sent:
|
||||
if word not in word_to_ix:
|
||||
word_to_ix[word] = len(word_to_ix)
|
||||
print(word_to_ix)
|
||||
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}
|
||||
|
||||
# 实际中通常使用更大的维度如32维, 64维.
|
||||
# 这里我们使用小的维度, 为了方便查看训练过程中权重的变化.
|
||||
EMBEDDING_DIM = 6
|
||||
HIDDEN_DIM = 6
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
{'The': 0, 'dog': 1, 'ate': 2, 'the': 3, 'apple': 4, 'Everybody': 5, 'read': 6, 'that': 7, 'book': 8}
|
||||
```
|
||||
|
||||
#### 2.1 创建模型
|
||||
```buildoutcfg
|
||||
class LSTMTagger(nn.Module):
|
||||
|
||||
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
|
||||
super(LSTMTagger, self).__init__()
|
||||
self.hidden_dim = hidden_dim
|
||||
|
||||
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
|
||||
|
||||
# LSTM以word_embeddings作为输入, 输出维度为 hidden_dim 的隐藏状态值
|
||||
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
|
||||
|
||||
# 线性层将隐藏状态空间映射到标注空间
|
||||
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
|
||||
self.hidden = self.init_hidden()
|
||||
|
||||
def init_hidden(self):
|
||||
# 一开始并没有隐藏状态所以我们要先初始化一个
|
||||
# 关于维度为什么这么设计请参考Pytoch相关文档
|
||||
# 各个维度的含义是 (num_layers, minibatch_size, hidden_dim)
|
||||
return (torch.zeros(1, 1, self.hidden_dim),
|
||||
torch.zeros(1, 1, self.hidden_dim))
|
||||
|
||||
def forward(self, sentence):
|
||||
embeds = self.word_embeddings(sentence)
|
||||
lstm_out, self.hidden = self.lstm(
|
||||
embeds.view(len(sentence), 1, -1), self.hidden)
|
||||
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
|
||||
tag_scores = F.log_softmax(tag_space, dim=1)
|
||||
return tag_scores
|
||||
```
|
||||
|
||||
#### 2.3 训练模型
|
||||
```buildoutcfg
|
||||
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
|
||||
loss_function = nn.NLLLoss()
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.1)
|
||||
|
||||
# 查看训练前的分数
|
||||
# 注意: 输出的 i,j 元素的值表示单词 i 的 j 标签的得分
|
||||
# 这里我们不需要训练不需要求导,所以使用torch.no_grad()
|
||||
with torch.no_grad():
|
||||
inputs = prepare_sequence(training_data[0][0], word_to_ix)
|
||||
tag_scores = model(inputs)
|
||||
print(tag_scores)
|
||||
|
||||
for epoch in range(300): # 实际情况下你不会训练300个周期, 此例中我们只是随便设了一个值
|
||||
for sentence, tags in training_data:
|
||||
# 第一步: 请记住Pytorch会累加梯度.
|
||||
# 我们需要在训练每个实例前清空梯度
|
||||
model.zero_grad()
|
||||
|
||||
# 此外还需要清空 LSTM 的隐状态,
|
||||
# 将其从上个实例的历史中分离出来.
|
||||
model.hidden = model.init_hidden()
|
||||
|
||||
# 准备网络输入, 将其变为词索引的 Tensor 类型数据
|
||||
sentence_in = prepare_sequence(sentence, word_to_ix)
|
||||
targets = prepare_sequence(tags, tag_to_ix)
|
||||
|
||||
# 第三步: 前向传播.
|
||||
tag_scores = model(sentence_in)
|
||||
|
||||
# 第四步: 计算损失和梯度值, 通过调用 optimizer.step() 来更新梯度
|
||||
loss = loss_function(tag_scores, targets)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# 查看训练后的得分
|
||||
with torch.no_grad():
|
||||
inputs = prepare_sequence(training_data[0][0], word_to_ix)
|
||||
tag_scores = model(inputs)
|
||||
|
||||
# 句子是 "the dog ate the apple", i,j 表示对于单词 i, 标签 j 的得分.
|
||||
# 我们采用得分最高的标签作为预测的标签. 从下面的输出我们可以看到, 预测得
|
||||
# 到的结果是0 1 2 0 1. 因为 索引是从0开始的, 因此第一个值0表示第一行的
|
||||
# 最大值, 第二个值1表示第二行的最大值, 以此类推. 所以最后的结果是 DET
|
||||
# NOUN VERB DET NOUN, 整个序列都是正确的!
|
||||
print(tag_scores)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[-1.1389, -1.2024, -0.9693],
|
||||
[-1.1065, -1.2200, -0.9834],
|
||||
[-1.1286, -1.2093, -0.9726],
|
||||
[-1.1190, -1.1960, -0.9916],
|
||||
[-1.0137, -1.2642, -1.0366]])
|
||||
tensor([[-0.0858, -2.9355, -3.5374],
|
||||
[-5.2313, -0.0234, -4.0314],
|
||||
[-3.9098, -4.1279, -0.0368],
|
||||
[-0.0187, -4.7809, -4.5960],
|
||||
[-5.8170, -0.0183, -4.1879]])
|
||||
```
|
||||
|
||||
## 3.练习:使用字符级特征来增强 LSTM 词性标注器
|
||||
在上面的例子中, 每个词都有一个词嵌入, 作为序列模型的输入. 接下来让我们使用每个的单词的 字符级别的表达来增强词嵌入。我们期望这
|
||||
个操作对结果能有显著提升, 因为像词缀这样的字符级信息对于词性有很大的影响。比如说, 像包含词缀 -ly 的单词基本上都是被标注为副词。
|
||||
|
||||
具体操作如下:用的字符级表达, 同之前一样,我们使用来表示词嵌入。序列模型的输入就
|
||||
变成了和的拼接。因此, 如果的维度是5, 
|
||||
的维度是3,那么我们的 LSTM 网络的输入维度大小就是8。
|
||||
|
||||
为了得到字符级别的表达, 将单词的每个字符输入一个 LSTM 网络, 而则为这个 LSTM 网络最后的隐状态。一些提示:
|
||||
|
||||
* 新模型中需要两个 LSTM, 一个跟之前一样, 用来输出词性标注的得分, 另外一个新增加的用来获取每个单词的字符级别表达。
|
||||
|
||||
* 为了在字符级别上运行序列模型,你需要用嵌入的字符来作为字符 LSTM 的输入。
|
@ -1 +1,251 @@
|
||||
# 词嵌入:编码形式的词汇语义
|
||||
# 词嵌入:编码形式的词汇语义
|
||||
词嵌入是一种由真实数字组成的稠密向量,每个向量都代表了单词表里的一个单词。在自然语言处理中,总会遇到这样的情况:特征全是单词!
|
||||
但是,如何在电脑上表述一个单词呢?你在电脑上存储的单词的 ASCII 码,但是它仅仅代表单词怎么拼写,没有说明单词的内在含义(你也许
|
||||
能够从词缀中了解它的词性,或者从大小写中得到一些属性,但仅此而已)。更重要的是,你能把这些 ASCII 码字符组合成什么含义?当
|
||||
代表词汇表、输入数据是维的情况下,我们往往想从神经网络中得到数据密集的结果,但是结果只有很少的几个维度(例如,预测的数据只有几个
|
||||
标签时)。我们如何从大的数据维度空间中得到稍小一点的维度空间?
|
||||
|
||||
放弃使用 ASCII 码字符的形式表示单词,换用 one-hot encoding 会怎么样了?好吧,这个单词就能这样表示:
|
||||
|
||||

|
||||
|
||||
其中,1表示的独有位置,其他位置全是0。其他的词都类似,在另外不一样的位置有一个1代表它,其他位置也都是0。这种表达除了占用巨大
|
||||
的空间外,还有个很大的缺陷。它只是简单的把词看做一个单独个体,认为它们之间毫无联系。我们真正想要的是能够表达单词之间一些相似
|
||||
的含义。为什么要这样做呢?来看下面的例子:
|
||||
|
||||
假如我们正在搭建一个语言模型,训练数据有下面一些句子:
|
||||
|
||||
* The mathematician ran to the store.
|
||||
|
||||
* The physicist ran to the store.
|
||||
|
||||
* The mathematician solved the open problem.
|
||||
|
||||
现在又得到一个没见过的新句子:
|
||||
|
||||
* The physicist solved the open problem.
|
||||
|
||||
我们的模型可能在这个句子上表现的还不错,但是,如果利用了下面两个事实,模型会表现更佳:
|
||||
|
||||
* 我们发现数学家和物理学家在句子里有相同的作用,所以在某种程度上,他们有语义的联系。
|
||||
|
||||
* 当看见物理学家在新句子中的作用时,我们发现数学家也有起着相同的作用。
|
||||
|
||||
然后我们就推测,物理学家在上面的句子里也类似于数学家吗?这就是我们所指的相似性理念: 指的是语义相似,而不是简单的拼写相似。
|
||||
这就是一种通过连接我们发现的和没发现的一些内容相似点、用于解决语言数据稀疏性的技术。这个例子依赖于一个基本的语言假设:那些在
|
||||
相似语句中出现的单词,在语义上也是相互关联的。这就叫做[distributional hypothesis(分布式假设)](https://en.wikipedia.org/wiki/Distributional_semantics)。
|
||||
|
||||
## 1. Getting Dense Word Embeddings(密集词嵌入)
|
||||
我们如何解决这个问题呢?也就是,怎么编码单词中的语义相似性? 也许我们会想到一些语义属性。 举个例子,我们发现数学家和物理学家
|
||||
都能跑,所以也许可以给含有“能跑”语义属性的单词打高分,考虑一下其他的属性,想象一下你可能会在这些属性上给普通的单词打什么分。
|
||||
|
||||
如果每个属性都表示一个维度,那我们也许可以用一个向量表示一个单词,就像这样:
|
||||
|
||||

|
||||
|
||||
那么,我们就这可以通过下面的方法得到这些单词之间的相似性:
|
||||
|
||||

|
||||
|
||||
尽管通常情况下需要进行长度归一化:
|
||||
|
||||

|
||||
|
||||
是两个向量的夹角。这就意味着,完全相似的单词相似度为1。完全不相似的单词相似度为-1。
|
||||
|
||||
你可以把本章开头介绍的 one-hot 稀疏向量看做是我们新定义向量的一种特殊形式,那里的单词相似度为0,现在我们给每个单词一些独特的
|
||||
语义属性。这些向量数据密集,也就是说它们数字通常都非零。
|
||||
|
||||
但是新的这些向量存在一个严重的问题:你可以想到数千种不同的语义属性,它们可能都与决定相似性有关,而且,到底如何设置不同属性的
|
||||
值呢?深度学习的中心思想是用神经网络来学习特征的表示,而不是程序员去设计它们。所以为什么不把词嵌入只当做模型参数,而是通过训
|
||||
练来更新呢?这就才是我们要确切做的事。我们将用神经网络做一些潜在语义属性,但是原则上,学习才是关键。 注意,词嵌入可能无法解释。
|
||||
就是说,尽管使用我们上面手动制作的向量,能够发现数学家和物理学家都喜欢喝咖啡的相似性,如果我们允许神经网络来学习词嵌入,那么
|
||||
就会发现数学家和物理学家在第二维度有个较大的值,它所代表的含义很不清晰。它们在一些潜在语义上是相似的,但是对我们来说无法解释。
|
||||
|
||||
## 2. Pytorch中的词嵌入
|
||||
|
||||
在我们举例或练习之前,这里有一份关于如何在Pytorch和常见的深度学习中使用词嵌入的简要介绍。与制作 one-hot 向量时对每个单词定义
|
||||
一个特殊的索引类似,当我们使用词向量时也需要为每个单词定义一个索引。这些索引将是查询表的关键点。意思就是,词嵌入被被存储在一
|
||||
个的向量中,其中是词嵌入的维度。词被被分配的索引 i,表示在向量的第i行存储它的嵌入。
|
||||
在所有的代码中,从单词到索引的映射是一个叫 word_to_ix 的字典。
|
||||
|
||||
能使用词嵌入的模块是`torch.nn.Embedding`,这里面有两个参数:词汇表的大小和词嵌入的维度。
|
||||
|
||||
索引这张表时,你必须使用`torch.LongTensor`(因为索引是整数,不是浮点数)。
|
||||
|
||||
```buildoutcfg
|
||||
# 作者: Robert Guthrie
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
torch.manual_seed(1)
|
||||
```
|
||||
|
||||
```buildoutcfg
|
||||
word_to_ix = {"hello": 0, "world": 1}
|
||||
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
|
||||
lookup_tensor = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
|
||||
hello_embed = embeds(lookup_tensor)
|
||||
print(hello_embed)
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
tensor([[ 0.6614, 0.2669, 0.0617, 0.6213, -0.4519]],
|
||||
grad_fn=<EmbeddingBackward>)
|
||||
```
|
||||
## 3.例子: N-Gram语言模型
|
||||
回想一下,在 n-gram 语言模型中,给定一个单词序列向量,我们要计算的是:
|
||||
|
||||

|
||||
|
||||
是单词序列的第 i 个单词。 在本例中,我们将在训练样例上计算损失函数,并且用反向传播算法更新参数。
|
||||
|
||||
```buildoutcfg
|
||||
CONTEXT_SIZE = 2
|
||||
EMBEDDING_DIM = 10
|
||||
# 我们用莎士比亚的十四行诗 Sonnet 2
|
||||
test_sentence = """When forty winters shall besiege thy brow,
|
||||
And dig deep trenches in thy beauty's field,
|
||||
Thy youth's proud livery so gazed on now,
|
||||
Will be a totter'd weed of small worth held:
|
||||
Then being asked, where all thy beauty lies,
|
||||
Where all the treasure of thy lusty days;
|
||||
To say, within thine own deep sunken eyes,
|
||||
Were an all-eating shame, and thriftless praise.
|
||||
How much more praise deserv'd thy beauty's use,
|
||||
If thou couldst answer 'This fair child of mine
|
||||
Shall sum my count, and make my old excuse,'
|
||||
Proving his beauty by succession thine!
|
||||
This were to be new made when thou art old,
|
||||
And see thy blood warm when thou feel'st it cold.""".split()
|
||||
# 应该对输入变量进行标记,但暂时忽略。
|
||||
# 创建一系列的元组,每个元组都是([ word_i-2, word_i-1 ], target word)的形式。
|
||||
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
|
||||
for i in range(len(test_sentence) - 2)]
|
||||
# 输出前3行,先看下是什么样子。
|
||||
print(trigrams[:3])
|
||||
|
||||
vocab = set(test_sentence)
|
||||
word_to_ix = {word: i for i, word in enumerate(vocab)}
|
||||
|
||||
class NGramLanguageModeler(nn.Module):
|
||||
|
||||
def __init__(self, vocab_size, embedding_dim, context_size):
|
||||
super(NGramLanguageModeler, self).__init__()
|
||||
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
|
||||
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
|
||||
self.linear2 = nn.Linear(128, vocab_size)
|
||||
|
||||
def forward(self, inputs):
|
||||
embeds = self.embeddings(inputs).view((1, -1))
|
||||
out = F.relu(self.linear1(embeds))
|
||||
out = self.linear2(out)
|
||||
log_probs = F.log_softmax(out, dim=1)
|
||||
return log_probs
|
||||
|
||||
losses = []
|
||||
loss_function = nn.NLLLoss()
|
||||
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.001)
|
||||
|
||||
for epoch in range(10):
|
||||
total_loss = 0
|
||||
for context, target in trigrams:
|
||||
|
||||
# 步骤 1\. 准备好进入模型的数据 (例如将单词转换成整数索引,并将其封装在变量中)
|
||||
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
|
||||
|
||||
# 步骤 2\. 回调torch累乘梯度
|
||||
# 在传入一个新实例之前,需要把旧实例的梯度置零。
|
||||
model.zero_grad()
|
||||
|
||||
# 步骤 3\. 继续运行代码,得到单词的log概率值。
|
||||
log_probs = model(context_idxs)
|
||||
|
||||
# 步骤 4\. 计算损失函数(再次注意,Torch需要将目标单词封装在变量里)。
|
||||
loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
|
||||
|
||||
# 步骤 5\. 反向传播更新梯度
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# 通过调tensor.item()得到单个Python数值。
|
||||
total_loss += loss.item()
|
||||
losses.append(total_loss)
|
||||
print(losses) # 用训练数据每次迭代,损失函数都会下降。
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
|
||||
```buildoutcfg
|
||||
[(['When', 'forty'], 'winters'), (['forty', 'winters'], 'shall'), (['winters', 'shall'], 'besiege')]
|
||||
[523.1487259864807, 520.6150465011597, 518.0996162891388, 515.6003141403198, 513.1156675815582, 510.645352602005, 508.1888840198517, 505.74565410614014, 503.314866065979, 500.8949146270752]
|
||||
```
|
||||
|
||||
## 4.练习:计算连续词袋模型的词向量
|
||||
连续词袋模型(CBOW)在NLP深度学习中使用很频繁。它是一个模型,尝试通过目标词前后几个单词的文本,来预测目标词。这有别于语言模型,
|
||||
因为CBOW不是序列的,也不必是概率性的。CBOW常用于快速地训练词向量,得到的嵌入用来初始化一些复杂模型的嵌入。通常情况下,这被称
|
||||
为*预训练嵌入*。它几乎总能帮忙把模型性能提升几个百分点。
|
||||
|
||||
CBOW 模型如下所示:给定一个单词,代表两边的滑窗距,如和,
|
||||
并将所有的上下文词统称为 ,CBOW 试图最小化
|
||||
|
||||

|
||||
|
||||
其中是单词的嵌入。
|
||||
|
||||
在 Pytorch 中,通过填充下面的类来实现这个模型,有两条需要注意:
|
||||
|
||||
* 考虑下你需要定义哪些参数。
|
||||
|
||||
* 确保你知道每步操作后的结构,如果想重构,请使用`.view()`。
|
||||
|
||||
```buildoutcfg
|
||||
CONTEXT_SIZE = 2 # 左右各两个词
|
||||
raw_text = """We are about to study the idea of a computational process.
|
||||
Computational processes are abstract beings that inhabit computers.
|
||||
As they evolve, processes manipulate other abstract things called data.
|
||||
The evolution of a process is directed by a pattern of rules
|
||||
called a program. People create programs to direct processes. In effect,
|
||||
we conjure the spirits of the computer with our spells.""".split()
|
||||
|
||||
# 通过对`raw_text`使用set()函数,我们进行去重操作
|
||||
vocab = set(raw_text)
|
||||
vocab_size = len(vocab)
|
||||
|
||||
word_to_ix = {word: i for i, word in enumerate(vocab)}
|
||||
data = []
|
||||
for i in range(2, len(raw_text) - 2):
|
||||
context = [raw_text[i - 2], raw_text[i - 1],
|
||||
raw_text[i + 1], raw_text[i + 2]]
|
||||
target = raw_text[i]
|
||||
data.append((context, target))
|
||||
print(data[:5])
|
||||
|
||||
class CBOW(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def forward(self, inputs):
|
||||
pass
|
||||
|
||||
# 创建模型并且训练。这里有些函数帮你在使用模块之前制作数据。
|
||||
|
||||
def make_context_vector(context, word_to_ix):
|
||||
idxs = [word_to_ix[w] for w in context]
|
||||
return torch.tensor(idxs, dtype=torch.long)
|
||||
|
||||
make_context_vector(data[0][0], word_to_ix) # example
|
||||
```
|
||||
|
||||
* 输出结果:
|
||||
```buildoutcfg
|
||||
[(['We', 'are', 'to', 'study'], 'about'), (['are', 'about', 'study', 'the'], 'to'), (['about', 'to', 'the', 'idea'], 'study'), (['to', 'study', 'idea', 'of'], 'the'), (['study', 'the', 'of', 'a'], 'idea')]
|
||||
```
|
||||
|
||||
|
BIN
FifthSection/notation/(2,4,NP).gif
Normal file
After Width: | Height: | Size: 416 B |
BIN
FifthSection/notation/(I,J,R).gif
Normal file
After Width: | Height: | Size: 631 B |
BIN
FifthSection/notation/21.gif
Normal file
After Width: | Height: | Size: 345 B |
BIN
FifthSection/notation/AC.gif
Normal file
After Width: | Height: | Size: 193 B |
BIN
FifthSection/notation/Ad+b.gif
Normal file
After Width: | Height: | Size: 284 B |
BIN
FifthSection/notation/C.gif
Normal file
After Width: | Height: | Size: 145 B |
BIN
FifthSection/notation/D.gif
Normal file
After Width: | Height: | Size: 149 B |
BIN
FifthSection/notation/L(_theta).gif
Normal file
After Width: | Height: | Size: 267 B |
BIN
FifthSection/notation/N.gif
Normal file
After Width: | Height: | Size: 155 B |
BIN
FifthSection/notation/P.gif
Normal file
After Width: | Height: | Size: 142 B |
BIN
FifthSection/notation/P_jk.gif
Normal file
After Width: | Height: | Size: 227 B |
BIN
FifthSection/notation/ReLU.gif
Normal file
After Width: | Height: | Size: 433 B |
BIN
FifthSection/notation/Score(x,y).gif
Normal file
After Width: | Height: | Size: 1.1 KiB |
BIN
FifthSection/notation/Score(x,y)2.gif
Normal file
After Width: | Height: | Size: 2.8 KiB |
BIN
FifthSection/notation/Score_2.gif
Normal file
After Width: | Height: | Size: 2.6 KiB |
BIN
FifthSection/notation/T.gif
Normal file
After Width: | Height: | Size: 142 B |
BIN
FifthSection/notation/V.gif
Normal file
After Width: | Height: | Size: 142 B |
BIN
FifthSection/notation/_T_.gif
Normal file
After Width: | Height: | Size: 308 B |
BIN
FifthSection/notation/_V_.gif
Normal file
After Width: | Height: | Size: 178 B |
BIN
FifthSection/notation/_V_XD.gif
Normal file
After Width: | Height: | Size: 301 B |
BIN
FifthSection/notation/c_w.gif
Normal file
After Width: | Height: | Size: 157 B |
BIN
FifthSection/notation/f(g(x)).gif
Normal file
After Width: | Height: | Size: 371 B |
BIN
FifthSection/notation/f(x).gif
Normal file
After Width: | Height: | Size: 255 B |
BIN
FifthSection/notation/f(x)=.gif
Normal file
After Width: | Height: | Size: 469 B |
BIN
FifthSection/notation/g(x).gif
Normal file
After Width: | Height: | Size: 478 B |
BIN
FifthSection/notation/h_i.gif
Normal file
After Width: | Height: | Size: 161 B |
BIN
FifthSection/notation/h_t.gif
Normal file
After Width: | Height: | Size: 169 B |
BIN
FifthSection/notation/i.gif
Normal file
After Width: | Height: | Size: 117 B |
BIN
FifthSection/notation/j.gif
Normal file
After Width: | Height: | Size: 130 B |
BIN
FifthSection/notation/k.gif
Normal file
After Width: | Height: | Size: 133 B |
BIN
FifthSection/notation/log.gif
Normal file
After Width: | Height: | Size: 480 B |
BIN
FifthSection/notation/logSoftmax.gif
Normal file
After Width: | Height: | Size: 785 B |
BIN
FifthSection/notation/p(y_x).gif
Normal file
After Width: | Height: | Size: 1.6 KiB |
BIN
FifthSection/notation/phi.gif
Normal file
After Width: | Height: | Size: 145 B |
BIN
FifthSection/notation/q_w.gif
Normal file
After Width: | Height: | Size: 168 B |
BIN
FifthSection/notation/sigma(x).gif
Normal file
After Width: | Height: | Size: 249 B |
BIN
FifthSection/notation/tanh(x).gif
Normal file
After Width: | Height: | Size: 380 B |
BIN
FifthSection/notation/w.gif
Normal file
After Width: | Height: | Size: 138 B |
BIN
FifthSection/notation/w_1-w_M.gif
Normal file
After Width: | Height: | Size: 322 B |
BIN
FifthSection/notation/w_i.gif
Normal file
After Width: | Height: | Size: 161 B |
BIN
FifthSection/notation/x.gif
Normal file
After Width: | Height: | Size: 124 B |
BIN
FifthSection/notation/x_w.gif
Normal file
After Width: | Height: | Size: 164 B |
BIN
FifthSection/notation/y.gif
Normal file
After Width: | Height: | Size: 137 B |
BIN
FifthSection/notation/y_i.gif
Normal file
After Width: | Height: | Size: 158 B |
BIN
FifthSection/notation/公式.PNG
Normal file
After Width: | Height: | Size: 3.6 KiB |
BIN
FifthSection/notation/公式11.PNG
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
FifthSection/notation/公式12.PNG
Normal file
After Width: | Height: | Size: 6.5 KiB |
BIN
FifthSection/notation/公式13.PNG
Normal file
After Width: | Height: | Size: 6.3 KiB |
BIN
FifthSection/notation/公式15.PNG
Normal file
After Width: | Height: | Size: 9.1 KiB |
BIN
FifthSection/notation/公式16.gif
Normal file
After Width: | Height: | Size: 255 B |
BIN
FifthSection/notation/公式17.gif
Normal file
After Width: | Height: | Size: 179 B |
BIN
FifthSection/notation/公式18.gif
Normal file
After Width: | Height: | Size: 367 B |
BIN
FifthSection/notation/公式19.gif
Normal file
After Width: | Height: | Size: 1.2 KiB |
BIN
FifthSection/notation/公式20.PNG
Normal file
After Width: | Height: | Size: 2.8 KiB |
BIN
FifthSection/notation/公式20.gif
Normal file
After Width: | Height: | Size: 270 B |
BIN
FifthSection/notation/公式21.PNG
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
FifthSection/notation/公式22.PNG
Normal file
After Width: | Height: | Size: 5.8 KiB |
BIN
FifthSection/notation/公式23.PNG
Normal file
After Width: | Height: | Size: 9.3 KiB |
BIN
FifthSection/notation/公式24.gif
Normal file
After Width: | Height: | Size: 761 B |
BIN
FifthSection/notation/公式25.gif
Normal file
After Width: | Height: | Size: 362 B |
BIN
FifthSection/notation/公式26.gif
Normal file
After Width: | Height: | Size: 397 B |
BIN
FifthSection/notation/公式27.gif
Normal file
After Width: | Height: | Size: 1.6 KiB |
BIN
FifthSection/notation/公式4.PNG
Normal file
After Width: | Height: | Size: 2.9 KiB |