import tensorflow as tf
import numpy as np
from helpers import lazy_property
class EmbeddingModel:
def __init__(self, data, target, params):
self.data = data
self.target = target
self.params = params
self.embeddings
self.cost
self.optimize
@lazy_property
def embeddings(self):
initial = tf.random_uniform(
[self.params.vocabulary_size, self.params.embedding_size],
-1.0, 1.0)
return tf.Variable(initial)
@lazy_property
def optimize(self):
optimizer = tf.train.MomentumOptimizer(
self.params.learning_rate, self.params.momentum)
return optimizer.minimize(self.cost)
@lazy_property
def cost(self):
embedded = tf.nn.embedding_lookup(self.embeddings, self.data)
weight = tf.Variable(tf.truncated_normal(
[self.params.vocabulary_size, self.params.embedding_size],
stddev=1.0 / self.params.embedding_size ** 0.5))
bias = tf.Variable(tf.zeros([self.params.vocabulary_size]))
target = tf.expand_dims(self.target, 1)
return tf.reduce_mean(tf.nn.nce_loss(
weight, bias, embedded, target,
self.params.contrastive_examples,
self.params.vocabulary_size))
import collections
import tensorflow as tf
import numpy as np
from batched import batched
from EmbeddingModel import EmbeddingModel
from skipgrams import skipgrams
from Wikipedia import Wikipedia
from helpers import AttrDict
WIKI_DOWNLOAD_DIR = './wikipedia'
params = AttrDict(
vocabulary_size=10000,
max_context=10,
embedding_size=200,
contrastive_examples=100,
learning_rate=0.5,
momentum=0.5,
batch_size=1000,
)
data = tf.placeholder(tf.int32, [None])
target = tf.placeholder(tf.int32, [None])
model = EmbeddingModel(data, target, params)
corpus = Wikipedia(
'https://dumps.wikimedia.org/enwiki/20160501/'
'enwiki-20160501-pages-meta-current1.xml-p000000010p000030303.bz2',
WIKI_DOWNLOAD_DIR,
params.vocabulary_size)
examples = skipgrams(corpus, params.max_context)
batches = batched(examples, params.batch_size)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
average = collections.deque(maxlen=100)
for index, batch in enumerate(batches):
feed_dict = {data: batch[0], target: batch[1]}
cost, _ = sess.run([model.cost, model.optimize], feed_dict)
average.append(cost)
print('{}: {:5.1f}'.format(index + 1, sum(average) / len(average)))
if index > 100000:
break
embeddings = sess.run(model.embeddings)
np.save(WIKI_DOWNLOAD_DIR + '/embeddings.npy', embeddings)
以上就是词向量嵌入的实例详解的详细内容,更多文章请关注木庄网络博客!!
相关阅读 >>
Python中json模块和pickle模块的简单介绍(附示例)
更多相关阅读请进入《Python》频道 >>

Python编程 从入门到实践 第2版
python入门书籍,非常畅销,超高好评,python官方公认好书。